xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c (revision 5e3779b7ab02bc2689d68e0baff859fbb14f9cc9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
15 #include "tfp.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
19 #include "ulp_tun.h"
20 #include "ulp_template_db_tbl.h"
21 
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK		0x700
25 #define ULP_VLAN_TAG_MASK		0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN		4789
27 
28 /* Utility function to skip the void items. */
29 static inline int32_t
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
31 {
32 	if (!*item)
33 		return 0;
34 	if (increment)
35 		(*item)++;
36 	while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
37 		(*item)++;
38 	if (*item)
39 		return 1;
40 	return 0;
41 }
42 
43 /* Utility function to copy field spec items */
44 static struct ulp_rte_hdr_field *
45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
46 			const void *buffer,
47 			uint32_t size)
48 {
49 	field->size = size;
50 	memcpy(field->spec, buffer, field->size);
51 	field++;
52 	return field;
53 }
54 
55 /* Utility function to update the field_bitmap */
56 static void
57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
58 				   uint32_t idx,
59 				   enum bnxt_ulp_prsr_action prsr_act)
60 {
61 	struct ulp_rte_hdr_field *field;
62 
63 	field = &params->hdr_field[idx];
64 	if (ulp_bitmap_notzero(field->mask, field->size)) {
65 		ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
66 		if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
67 			ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
68 		/* Not exact match */
69 		if (!ulp_bitmap_is_ones(field->mask, field->size))
70 			ULP_COMP_FLD_IDX_WR(params,
71 					    BNXT_ULP_CF_IDX_WC_MATCH, 1);
72 	} else {
73 		ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
74 	}
75 }
76 
77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
78 /* Utility function to copy field spec and masks items */
79 static void
80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
81 		      uint32_t *idx,
82 		      uint32_t size,
83 		      const void *spec_buff,
84 		      const void *mask_buff,
85 		      enum bnxt_ulp_prsr_action prsr_act)
86 {
87 	struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
88 
89 	/* update the field size */
90 	field->size = size;
91 
92 	/* copy the mask specifications only if mask is not null */
93 	if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
94 		memcpy(field->mask, mask_buff, size);
95 		ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
96 	}
97 
98 	/* copy the protocol specifications only if mask is not null*/
99 	if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
100 		memcpy(field->spec, spec_buff, size);
101 
102 	/* Increment the index */
103 	*idx = *idx + 1;
104 }
105 
106 /* Utility function to copy field spec and masks items */
107 static int32_t
108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
109 			       uint32_t *idx,
110 			       uint32_t size)
111 {
112 	if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
113 		BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
114 		return -EINVAL;
115 	}
116 	*idx = params->field_idx;
117 	params->field_idx += size;
118 	return 0;
119 }
120 
121 /*
122  * Function to handle the parsing of RTE Flows and placing
123  * the RTE flow items into the ulp structures.
124  */
125 int32_t
126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
127 			      struct ulp_rte_parser_params *params)
128 {
129 	const struct rte_flow_item *item = pattern;
130 	struct bnxt_ulp_rte_hdr_info *hdr_info;
131 
132 	params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
133 
134 	/* Set the computed flags for no vlan tags before parsing */
135 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
136 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
137 
138 	/* Parse all the items in the pattern */
139 	while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
140 		if (item->type >= (typeof(item->type))
141 		    BNXT_RTE_FLOW_ITEM_TYPE_END) {
142 			if (item->type >=
143 			    (typeof(item->type))BNXT_RTE_FLOW_ITEM_TYPE_LAST)
144 				goto hdr_parser_error;
145 			/* get the header information */
146 			hdr_info = &ulp_vendor_hdr_info[item->type -
147 				BNXT_RTE_FLOW_ITEM_TYPE_END];
148 		} else {
149 			if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
150 				goto hdr_parser_error;
151 			hdr_info = &ulp_hdr_info[item->type];
152 		}
153 		if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
154 			goto hdr_parser_error;
155 		} else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
156 			/* call the registered callback handler */
157 			if (hdr_info->proto_hdr_func) {
158 				if (hdr_info->proto_hdr_func(item, params) !=
159 				    BNXT_TF_RC_SUCCESS) {
160 					return BNXT_TF_RC_ERROR;
161 				}
162 			}
163 		}
164 		item++;
165 	}
166 	/* update the implied SVIF */
167 	return ulp_rte_parser_implicit_match_port_process(params);
168 
169 hdr_parser_error:
170 	BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
171 		    item->type);
172 	return BNXT_TF_RC_PARSE_ERR;
173 }
174 
175 /*
176  * Function to handle the parsing of RTE Flows and placing
177  * the RTE flow actions into the ulp structures.
178  */
179 int32_t
180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
181 			      struct ulp_rte_parser_params *params)
182 {
183 	const struct rte_flow_action *action_item = actions;
184 	struct bnxt_ulp_rte_act_info *hdr_info;
185 
186 	/* Parse all the items in the pattern */
187 	while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
188 		if (action_item->type >=
189 		    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_END) {
190 			if (action_item->type >=
191 			    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_LAST)
192 				goto act_parser_error;
193 			/* get the header information from bnxt actinfo table */
194 			hdr_info = &ulp_vendor_act_info[action_item->type -
195 				BNXT_RTE_FLOW_ACTION_TYPE_END];
196 		} else {
197 			if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
198 				goto act_parser_error;
199 			/* get the header information from the act info table */
200 			hdr_info = &ulp_act_info[action_item->type];
201 		}
202 		if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
203 			goto act_parser_error;
204 		} else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
205 			/* call the registered callback handler */
206 			if (hdr_info->proto_act_func) {
207 				if (hdr_info->proto_act_func(action_item,
208 							     params) !=
209 				    BNXT_TF_RC_SUCCESS) {
210 					return BNXT_TF_RC_ERROR;
211 				}
212 			}
213 		}
214 		action_item++;
215 	}
216 	/* update the implied port details */
217 	ulp_rte_parser_implicit_act_port_process(params);
218 	return BNXT_TF_RC_SUCCESS;
219 
220 act_parser_error:
221 	BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
222 		    action_item->type);
223 	return BNXT_TF_RC_ERROR;
224 }
225 
226 /*
227  * Function to handle the post processing of the computed
228  * fields for the interface.
229  */
230 static void
231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
232 {
233 	uint32_t ifindex;
234 	uint16_t port_id, parif;
235 	uint32_t mtype;
236 	enum bnxt_ulp_direction_type dir;
237 
238 	/* get the direction details */
239 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
240 
241 	/* read the port id details */
242 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
243 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
244 					      port_id,
245 					      &ifindex)) {
246 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
247 		return;
248 	}
249 
250 	if (dir == BNXT_ULP_DIR_INGRESS) {
251 		/* Set port PARIF */
252 		if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
253 					  BNXT_ULP_PHY_PORT_PARIF, &parif)) {
254 			BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
255 			return;
256 		}
257 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
258 				    parif);
259 	} else {
260 		/* Get the match port type */
261 		mtype = ULP_COMP_FLD_IDX_RD(params,
262 					    BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 		if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
264 			ULP_COMP_FLD_IDX_WR(params,
265 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
266 					    1);
267 			/* Set VF func PARIF */
268 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
269 						  BNXT_ULP_VF_FUNC_PARIF,
270 						  &parif)) {
271 				BNXT_TF_DBG(ERR,
272 					    "ParseErr:ifindex is not valid\n");
273 				return;
274 			}
275 			ULP_COMP_FLD_IDX_WR(params,
276 					    BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
277 					    parif);
278 
279 		} else {
280 			/* Set DRV func PARIF */
281 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
282 						  BNXT_ULP_DRV_FUNC_PARIF,
283 						  &parif)) {
284 				BNXT_TF_DBG(ERR,
285 					    "ParseErr:ifindex is not valid\n");
286 				return;
287 			}
288 			ULP_COMP_FLD_IDX_WR(params,
289 					    BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
290 					    parif);
291 		}
292 		if (mtype == BNXT_ULP_INTF_TYPE_PF) {
293 			ULP_COMP_FLD_IDX_WR(params,
294 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
295 					    1);
296 		}
297 	}
298 }
299 
300 static int32_t
301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
302 {
303 	enum bnxt_ulp_intf_type match_port_type, act_port_type;
304 	enum bnxt_ulp_direction_type dir;
305 	uint32_t act_port_set;
306 
307 	/* Get the computed details */
308 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
309 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
310 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
311 	act_port_type = ULP_COMP_FLD_IDX_RD(params,
312 					    BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
313 	act_port_set = ULP_COMP_FLD_IDX_RD(params,
314 					   BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
315 
316 	/* set the flow direction in the proto and action header */
317 	if (dir == BNXT_ULP_DIR_EGRESS) {
318 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
319 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
320 		ULP_BITMAP_SET(params->act_bitmap.bits,
321 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
322 	}
323 
324 	/* calculate the VF to VF flag */
325 	if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
326 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
327 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
328 
329 	/* Update the decrement ttl computational fields */
330 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
331 			     BNXT_ULP_ACT_BIT_DEC_TTL)) {
332 		/*
333 		 * Check that vxlan proto is included and vxlan decap
334 		 * action is not set then decrement tunnel ttl.
335 		 * Similarly add GRE and NVGRE in future.
336 		 */
337 		if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
338 				      BNXT_ULP_HDR_BIT_T_VXLAN) &&
339 		    !ULP_BITMAP_ISSET(params->act_bitmap.bits,
340 				      BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
341 			ULP_COMP_FLD_IDX_WR(params,
342 					    BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
343 		} else {
344 			ULP_COMP_FLD_IDX_WR(params,
345 					    BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
346 		}
347 	}
348 
349 	/* Merge the hdr_fp_bit into the proto header bit */
350 	params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
351 
352 	/* Update the comp fld fid */
353 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
354 
355 	/* Update the computed interface parameters */
356 	bnxt_ulp_comp_fld_intf_update(params);
357 
358 	/* TBD: Handle the flow rejection scenarios */
359 	return 0;
360 }
361 
362 /*
363  * Function to handle the post processing of the parsing details
364  */
365 void
366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
367 {
368 	ulp_post_process_normal_flow(params);
369 }
370 
371 /*
372  * Function to compute the flow direction based on the match port details
373  */
374 static void
375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
376 {
377 	enum bnxt_ulp_intf_type match_port_type;
378 
379 	/* Get the match port type */
380 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
381 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
382 
383 	/* If ingress flow and matchport is vf rep then dir is egress*/
384 	if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
385 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
386 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
387 				    BNXT_ULP_DIR_EGRESS);
388 	} else {
389 		/* Assign the input direction */
390 		if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
391 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
392 					    BNXT_ULP_DIR_INGRESS);
393 		else
394 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
395 					    BNXT_ULP_DIR_EGRESS);
396 	}
397 }
398 
399 /* Function to handle the parsing of RTE Flow item PF Header. */
400 static int32_t
401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
402 			uint32_t ifindex,
403 			uint16_t mask,
404 			enum bnxt_ulp_direction_type item_dir)
405 {
406 	uint16_t svif;
407 	enum bnxt_ulp_direction_type dir;
408 	struct ulp_rte_hdr_field *hdr_field;
409 	enum bnxt_ulp_svif_type svif_type;
410 	enum bnxt_ulp_intf_type port_type;
411 
412 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
413 	    BNXT_ULP_INVALID_SVIF_VAL) {
414 		BNXT_TF_DBG(ERR,
415 			    "SVIF already set,multiple source not support'd\n");
416 		return BNXT_TF_RC_ERROR;
417 	}
418 
419 	/* Get port type details */
420 	port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
421 	if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
422 		BNXT_TF_DBG(ERR, "Invalid port type\n");
423 		return BNXT_TF_RC_ERROR;
424 	}
425 
426 	/* Update the match port type */
427 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
428 
429 	/* compute the direction */
430 	bnxt_ulp_rte_parser_direction_compute(params);
431 
432 	/* Get the computed direction */
433 	dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
434 		ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
435 	if (dir == BNXT_ULP_DIR_INGRESS &&
436 	    port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
437 		svif_type = BNXT_ULP_PHY_PORT_SVIF;
438 	} else {
439 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
440 		    item_dir != BNXT_ULP_DIR_EGRESS)
441 			svif_type = BNXT_ULP_VF_FUNC_SVIF;
442 		else
443 			svif_type = BNXT_ULP_DRV_FUNC_SVIF;
444 	}
445 	ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
446 			     &svif);
447 	svif = rte_cpu_to_be_16(svif);
448 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
449 	memcpy(hdr_field->spec, &svif, sizeof(svif));
450 	memcpy(hdr_field->mask, &mask, sizeof(mask));
451 	hdr_field->size = sizeof(svif);
452 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
453 			    rte_be_to_cpu_16(svif));
454 	return BNXT_TF_RC_SUCCESS;
455 }
456 
457 /* Function to handle the parsing of the RTE port id */
458 int32_t
459 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
460 {
461 	uint16_t port_id = 0;
462 	uint16_t svif_mask = 0xFFFF;
463 	uint32_t ifindex;
464 	int32_t rc = BNXT_TF_RC_ERROR;
465 
466 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
467 	    BNXT_ULP_INVALID_SVIF_VAL)
468 		return BNXT_TF_RC_SUCCESS;
469 
470 	/* SVIF not set. So get the port id */
471 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
472 
473 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
474 					      port_id,
475 					      &ifindex)) {
476 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
477 		return rc;
478 	}
479 
480 	/* Update the SVIF details */
481 	rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
482 				     BNXT_ULP_DIR_INVALID);
483 	return rc;
484 }
485 
486 /* Function to handle the implicit action port id */
487 int32_t
488 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
489 {
490 	struct rte_flow_action action_item = {0};
491 	struct rte_flow_action_port_id port_id = {0};
492 
493 	/* Read the action port set bit */
494 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
495 		/* Already set, so just exit */
496 		return BNXT_TF_RC_SUCCESS;
497 	}
498 	port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
499 	action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID;
500 	action_item.conf = &port_id;
501 
502 	/* Update the action port based on incoming port */
503 	ulp_rte_port_act_handler(&action_item, params);
504 
505 	/* Reset the action port set bit */
506 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
507 	return BNXT_TF_RC_SUCCESS;
508 }
509 
510 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
511 int32_t
512 ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
513 			 struct ulp_rte_parser_params *params)
514 {
515 	enum bnxt_ulp_direction_type item_dir;
516 	uint16_t ethdev_id;
517 	uint16_t mask = 0;
518 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
519 	uint32_t ifindex;
520 
521 	if (!item->spec) {
522 		BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n");
523 		return rc;
524 	}
525 	if (!item->mask) {
526 		BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n");
527 		return rc;
528 	}
529 
530 	switch (item->type) {
531 	case RTE_FLOW_ITEM_TYPE_PORT_ID: {
532 		const struct rte_flow_item_port_id *port_spec = item->spec;
533 		const struct rte_flow_item_port_id *port_mask = item->mask;
534 
535 		item_dir = BNXT_ULP_DIR_INVALID;
536 		ethdev_id = port_spec->id;
537 		mask = port_mask->id;
538 		break;
539 	}
540 	case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
541 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
542 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
543 
544 		item_dir = BNXT_ULP_DIR_INGRESS;
545 		ethdev_id = ethdev_spec->port_id;
546 		mask = ethdev_mask->port_id;
547 		break;
548 	}
549 	case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
550 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
551 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
552 
553 		item_dir = BNXT_ULP_DIR_EGRESS;
554 		ethdev_id = ethdev_spec->port_id;
555 		mask = ethdev_mask->port_id;
556 		break;
557 	}
558 	default:
559 		BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n");
560 		return rc;
561 	}
562 
563 	/* perform the conversion from dpdk port to bnxt ifindex */
564 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
565 					      ethdev_id,
566 					      &ifindex)) {
567 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
568 		return rc;
569 	}
570 	/* Update the SVIF details */
571 	return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
572 }
573 
574 /* Function to handle the update of proto header based on field values */
575 static void
576 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
577 			     uint16_t type, uint32_t in_flag)
578 {
579 	if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
580 		if (in_flag) {
581 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
582 				       BNXT_ULP_HDR_BIT_I_IPV4);
583 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
584 		} else {
585 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
586 				       BNXT_ULP_HDR_BIT_O_IPV4);
587 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
588 		}
589 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
590 		if (in_flag) {
591 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
592 				       BNXT_ULP_HDR_BIT_I_IPV6);
593 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
594 		} else {
595 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
596 				       BNXT_ULP_HDR_BIT_O_IPV6);
597 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
598 		}
599 	}
600 }
601 
602 /* Internal Function to identify broadcast or multicast packets */
603 static int32_t
604 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
605 {
606 	if (rte_is_multicast_ether_addr(eth_addr) ||
607 	    rte_is_broadcast_ether_addr(eth_addr)) {
608 		BNXT_TF_DBG(DEBUG,
609 			    "No support for bcast or mcast addr offload\n");
610 		return 1;
611 	}
612 	return 0;
613 }
614 
615 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
616 int32_t
617 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
618 			struct ulp_rte_parser_params *params)
619 {
620 	const struct rte_flow_item_eth *eth_spec = item->spec;
621 	const struct rte_flow_item_eth *eth_mask = item->mask;
622 	uint32_t idx = 0, dmac_idx = 0;
623 	uint32_t size;
624 	uint16_t eth_type = 0;
625 	uint32_t inner_flag = 0;
626 
627 	/* Perform validations */
628 	if (eth_spec) {
629 		/* Todo: work around to avoid multicast and broadcast addr */
630 		if (ulp_rte_parser_is_bcmc_addr(&eth_spec->dst))
631 			return BNXT_TF_RC_PARSE_ERR;
632 
633 		if (ulp_rte_parser_is_bcmc_addr(&eth_spec->src))
634 			return BNXT_TF_RC_PARSE_ERR;
635 
636 		eth_type = eth_spec->type;
637 	}
638 
639 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
640 					   BNXT_ULP_PROTO_HDR_ETH_NUM)) {
641 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
642 		return BNXT_TF_RC_ERROR;
643 	}
644 	/*
645 	 * Copy the rte_flow_item for eth into hdr_field using ethernet
646 	 * header fields
647 	 */
648 	dmac_idx = idx;
649 	size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
650 	ulp_rte_prsr_fld_mask(params, &idx, size,
651 			      ulp_deference_struct(eth_spec, dst.addr_bytes),
652 			      ulp_deference_struct(eth_mask, dst.addr_bytes),
653 			      ULP_PRSR_ACT_DEFAULT);
654 
655 	size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
656 	ulp_rte_prsr_fld_mask(params, &idx, size,
657 			      ulp_deference_struct(eth_spec, src.addr_bytes),
658 			      ulp_deference_struct(eth_mask, src.addr_bytes),
659 			      ULP_PRSR_ACT_DEFAULT);
660 
661 	size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
662 	ulp_rte_prsr_fld_mask(params, &idx, size,
663 			      ulp_deference_struct(eth_spec, type),
664 			      ulp_deference_struct(eth_mask, type),
665 			      ULP_PRSR_ACT_MATCH_IGNORE);
666 
667 	/* Update the protocol hdr bitmap */
668 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
669 			     BNXT_ULP_HDR_BIT_O_ETH) ||
670 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
671 			     BNXT_ULP_HDR_BIT_O_IPV4) ||
672 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
673 			     BNXT_ULP_HDR_BIT_O_IPV6) ||
674 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
675 			     BNXT_ULP_HDR_BIT_O_UDP) ||
676 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
677 			     BNXT_ULP_HDR_BIT_O_TCP)) {
678 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
679 		inner_flag = 1;
680 	} else {
681 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
682 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
683 				    dmac_idx);
684 	}
685 	/* Update the field protocol hdr bitmap */
686 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
687 
688 	return BNXT_TF_RC_SUCCESS;
689 }
690 
691 /* Function to handle the parsing of RTE Flow item Vlan Header. */
692 int32_t
693 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
694 			 struct ulp_rte_parser_params *params)
695 {
696 	const struct rte_flow_item_vlan *vlan_spec = item->spec;
697 	const struct rte_flow_item_vlan *vlan_mask = item->mask;
698 	struct ulp_rte_hdr_bitmap	*hdr_bit;
699 	uint32_t idx = 0;
700 	uint16_t vlan_tag = 0, priority = 0;
701 	uint16_t vlan_tag_mask = 0, priority_mask = 0;
702 	uint32_t outer_vtag_num;
703 	uint32_t inner_vtag_num;
704 	uint16_t eth_type = 0;
705 	uint32_t inner_flag = 0;
706 	uint32_t size;
707 
708 	if (vlan_spec) {
709 		vlan_tag = ntohs(vlan_spec->tci);
710 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
711 		vlan_tag &= ULP_VLAN_TAG_MASK;
712 		vlan_tag = htons(vlan_tag);
713 		eth_type = vlan_spec->inner_type;
714 	}
715 
716 	if (vlan_mask) {
717 		vlan_tag_mask = ntohs(vlan_mask->tci);
718 		priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
719 		vlan_tag_mask &= 0xfff;
720 
721 		/*
722 		 * the storage for priority and vlan tag is 2 bytes
723 		 * The mask of priority which is 3 bits if it is all 1's
724 		 * then make the rest bits 13 bits as 1's
725 		 * so that it is matched as exact match.
726 		 */
727 		if (priority_mask == ULP_VLAN_PRIORITY_MASK)
728 			priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
729 		if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
730 			vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
731 		vlan_tag_mask = htons(vlan_tag_mask);
732 	}
733 
734 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
735 					   BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
736 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
737 		return BNXT_TF_RC_ERROR;
738 	}
739 
740 	/*
741 	 * Copy the rte_flow_item for vlan into hdr_field using Vlan
742 	 * header fields
743 	 */
744 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
745 	/*
746 	 * The priority field is ignored since OVS is setting it as
747 	 * wild card match and it is not supported. This is a work
748 	 * around and shall be addressed in the future.
749 	 */
750 	ulp_rte_prsr_fld_mask(params, &idx, size,
751 			      &priority,
752 			      (vlan_mask) ? &priority_mask : NULL,
753 			      ULP_PRSR_ACT_MASK_IGNORE);
754 
755 	ulp_rte_prsr_fld_mask(params, &idx, size,
756 			      &vlan_tag,
757 			      (vlan_mask) ? &vlan_tag_mask : NULL,
758 			      ULP_PRSR_ACT_DEFAULT);
759 
760 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
761 	ulp_rte_prsr_fld_mask(params, &idx, size,
762 			      ulp_deference_struct(vlan_spec, inner_type),
763 			      ulp_deference_struct(vlan_mask, inner_type),
764 			      ULP_PRSR_ACT_MATCH_IGNORE);
765 
766 	/* Get the outer tag and inner tag counts */
767 	outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
768 					     BNXT_ULP_CF_IDX_O_VTAG_NUM);
769 	inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
770 					     BNXT_ULP_CF_IDX_I_VTAG_NUM);
771 
772 	/* Update the hdr_bitmap of the vlans */
773 	hdr_bit = &params->hdr_bitmap;
774 	if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
775 	    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
776 	    !outer_vtag_num) {
777 		/* Update the vlan tag num */
778 		outer_vtag_num++;
779 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
780 				    outer_vtag_num);
781 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
782 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
783 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
784 			       BNXT_ULP_HDR_BIT_OO_VLAN);
785 		if (vlan_mask && vlan_tag_mask)
786 			ULP_COMP_FLD_IDX_WR(params,
787 					    BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
788 
789 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
790 		   !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
791 		   outer_vtag_num == 1) {
792 		/* update the vlan tag num */
793 		outer_vtag_num++;
794 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
795 				    outer_vtag_num);
796 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
797 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
798 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
799 			       BNXT_ULP_HDR_BIT_OI_VLAN);
800 		if (vlan_mask && vlan_tag_mask)
801 			ULP_COMP_FLD_IDX_WR(params,
802 					    BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
803 
804 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
805 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
806 		   !inner_vtag_num) {
807 		/* update the vlan tag num */
808 		inner_vtag_num++;
809 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
810 				    inner_vtag_num);
811 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
812 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
813 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
814 			       BNXT_ULP_HDR_BIT_IO_VLAN);
815 		if (vlan_mask && vlan_tag_mask)
816 			ULP_COMP_FLD_IDX_WR(params,
817 					    BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
818 		inner_flag = 1;
819 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
820 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
821 		   inner_vtag_num == 1) {
822 		/* update the vlan tag num */
823 		inner_vtag_num++;
824 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
825 				    inner_vtag_num);
826 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
827 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
828 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
829 			       BNXT_ULP_HDR_BIT_II_VLAN);
830 		if (vlan_mask && vlan_tag_mask)
831 			ULP_COMP_FLD_IDX_WR(params,
832 					    BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
833 		inner_flag = 1;
834 	} else {
835 		BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
836 		return BNXT_TF_RC_ERROR;
837 	}
838 	/* Update the field protocol hdr bitmap */
839 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
840 	return BNXT_TF_RC_SUCCESS;
841 }
842 
843 /* Function to handle the update of proto header based on field values */
844 static void
845 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
846 			     uint8_t proto, uint32_t in_flag)
847 {
848 	if (proto == IPPROTO_UDP) {
849 		if (in_flag) {
850 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
851 				       BNXT_ULP_HDR_BIT_I_UDP);
852 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
853 		} else {
854 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
855 				       BNXT_ULP_HDR_BIT_O_UDP);
856 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
857 		}
858 	} else if (proto == IPPROTO_TCP) {
859 		if (in_flag) {
860 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
861 				       BNXT_ULP_HDR_BIT_I_TCP);
862 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
863 		} else {
864 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
865 				       BNXT_ULP_HDR_BIT_O_TCP);
866 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
867 		}
868 	} else if (proto == IPPROTO_GRE) {
869 		ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
870 	} else if (proto == IPPROTO_ICMP) {
871 		if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
872 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
873 				       BNXT_ULP_HDR_BIT_I_ICMP);
874 		else
875 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
876 				       BNXT_ULP_HDR_BIT_O_ICMP);
877 	}
878 	if (proto) {
879 		if (in_flag) {
880 			ULP_COMP_FLD_IDX_WR(param,
881 					    BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
882 					    1);
883 			ULP_COMP_FLD_IDX_WR(param,
884 					    BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
885 					    proto);
886 		} else {
887 			ULP_COMP_FLD_IDX_WR(param,
888 					    BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
889 					    1);
890 			ULP_COMP_FLD_IDX_WR(param,
891 					    BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
892 					    proto);
893 		}
894 	}
895 }
896 
897 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
898 int32_t
899 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
900 			 struct ulp_rte_parser_params *params)
901 {
902 	const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
903 	const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
904 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
905 	uint32_t idx = 0, dip_idx = 0;
906 	uint32_t size;
907 	uint8_t proto = 0;
908 	uint32_t inner_flag = 0;
909 	uint32_t cnt;
910 
911 	/* validate there are no 3rd L3 header */
912 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
913 	if (cnt == 2) {
914 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
915 		return BNXT_TF_RC_ERROR;
916 	}
917 
918 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
919 					   BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
920 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
921 		return BNXT_TF_RC_ERROR;
922 	}
923 
924 	/*
925 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
926 	 * header fields
927 	 */
928 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
929 	ulp_rte_prsr_fld_mask(params, &idx, size,
930 			      ulp_deference_struct(ipv4_spec, hdr.version_ihl),
931 			      ulp_deference_struct(ipv4_mask, hdr.version_ihl),
932 			      ULP_PRSR_ACT_DEFAULT);
933 
934 	/*
935 	 * The tos field is ignored since OVS is setting it as wild card
936 	 * match and it is not supported. This is a work around and
937 	 * shall be addressed in the future.
938 	 */
939 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
940 	ulp_rte_prsr_fld_mask(params, &idx, size,
941 			      ulp_deference_struct(ipv4_spec,
942 						   hdr.type_of_service),
943 			      ulp_deference_struct(ipv4_mask,
944 						   hdr.type_of_service),
945 			      ULP_PRSR_ACT_MASK_IGNORE);
946 
947 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
948 	ulp_rte_prsr_fld_mask(params, &idx, size,
949 			      ulp_deference_struct(ipv4_spec, hdr.total_length),
950 			      ulp_deference_struct(ipv4_mask, hdr.total_length),
951 			      ULP_PRSR_ACT_DEFAULT);
952 
953 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
954 	ulp_rte_prsr_fld_mask(params, &idx, size,
955 			      ulp_deference_struct(ipv4_spec, hdr.packet_id),
956 			      ulp_deference_struct(ipv4_mask, hdr.packet_id),
957 			      ULP_PRSR_ACT_DEFAULT);
958 
959 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
960 	ulp_rte_prsr_fld_mask(params, &idx, size,
961 			      ulp_deference_struct(ipv4_spec,
962 						   hdr.fragment_offset),
963 			      ulp_deference_struct(ipv4_mask,
964 						   hdr.fragment_offset),
965 			      ULP_PRSR_ACT_MASK_IGNORE);
966 
967 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
968 	ulp_rte_prsr_fld_mask(params, &idx, size,
969 			      ulp_deference_struct(ipv4_spec, hdr.time_to_live),
970 			      ulp_deference_struct(ipv4_mask, hdr.time_to_live),
971 			      ULP_PRSR_ACT_DEFAULT);
972 
973 	/* Ignore proto for matching templates */
974 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
975 	ulp_rte_prsr_fld_mask(params, &idx, size,
976 			      ulp_deference_struct(ipv4_spec,
977 						   hdr.next_proto_id),
978 			      ulp_deference_struct(ipv4_mask,
979 						   hdr.next_proto_id),
980 			      ULP_PRSR_ACT_MATCH_IGNORE);
981 	if (ipv4_spec)
982 		proto = ipv4_spec->hdr.next_proto_id;
983 
984 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
985 	ulp_rte_prsr_fld_mask(params, &idx, size,
986 			      ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
987 			      ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
988 			      ULP_PRSR_ACT_DEFAULT);
989 
990 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
991 	ulp_rte_prsr_fld_mask(params, &idx, size,
992 			      ulp_deference_struct(ipv4_spec, hdr.src_addr),
993 			      ulp_deference_struct(ipv4_mask, hdr.src_addr),
994 			      ULP_PRSR_ACT_DEFAULT);
995 
996 	dip_idx = idx;
997 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
998 	ulp_rte_prsr_fld_mask(params, &idx, size,
999 			      ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1000 			      ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1001 			      ULP_PRSR_ACT_DEFAULT);
1002 
1003 	/* Set the ipv4 header bitmap and computed l3 header bitmaps */
1004 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1005 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1006 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1007 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1008 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1009 		inner_flag = 1;
1010 	} else {
1011 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1012 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1013 		/* Update the tunnel offload dest ip offset */
1014 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1015 				    dip_idx);
1016 	}
1017 
1018 	/* Some of the PMD applications may set the protocol field
1019 	 * in the IPv4 spec but don't set the mask. So, consider
1020 	 * the mask in the proto value calculation.
1021 	 */
1022 	if (ipv4_mask)
1023 		proto &= ipv4_mask->hdr.next_proto_id;
1024 
1025 	/* Update the field protocol hdr bitmap */
1026 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1027 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1028 	return BNXT_TF_RC_SUCCESS;
1029 }
1030 
1031 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1032 int32_t
1033 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1034 			 struct ulp_rte_parser_params *params)
1035 {
1036 	const struct rte_flow_item_ipv6	*ipv6_spec = item->spec;
1037 	const struct rte_flow_item_ipv6	*ipv6_mask = item->mask;
1038 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1039 	uint32_t idx = 0, dip_idx = 0;
1040 	uint32_t size;
1041 	uint32_t ver_spec = 0, ver_mask = 0;
1042 	uint32_t tc_spec = 0, tc_mask = 0;
1043 	uint32_t lab_spec = 0, lab_mask = 0;
1044 	uint8_t proto = 0;
1045 	uint32_t inner_flag = 0;
1046 	uint32_t cnt;
1047 
1048 	/* validate there are no 3rd L3 header */
1049 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1050 	if (cnt == 2) {
1051 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1052 		return BNXT_TF_RC_ERROR;
1053 	}
1054 
1055 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1056 					   BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1057 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1058 		return BNXT_TF_RC_ERROR;
1059 	}
1060 
1061 	/*
1062 	 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1063 	 * header fields
1064 	 */
1065 	if (ipv6_spec) {
1066 		ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1067 		tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1068 		lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1069 		proto = ipv6_spec->hdr.proto;
1070 	}
1071 
1072 	if (ipv6_mask) {
1073 		ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1074 		tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1075 		lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1076 
1077 		/* Some of the PMD applications may set the protocol field
1078 		 * in the IPv6 spec but don't set the mask. So, consider
1079 		 * the mask in proto value calculation.
1080 		 */
1081 		proto &= ipv6_mask->hdr.proto;
1082 	}
1083 
1084 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1085 	ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1086 			      ULP_PRSR_ACT_DEFAULT);
1087 	/*
1088 	 * The TC and flow label field are ignored since OVS is
1089 	 * setting it for match and it is not supported.
1090 	 * This is a work around and
1091 	 * shall be addressed in the future.
1092 	 */
1093 	ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1094 			      ULP_PRSR_ACT_MASK_IGNORE);
1095 	ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1096 			      ULP_PRSR_ACT_MASK_IGNORE);
1097 
1098 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1099 	ulp_rte_prsr_fld_mask(params, &idx, size,
1100 			      ulp_deference_struct(ipv6_spec, hdr.payload_len),
1101 			      ulp_deference_struct(ipv6_mask, hdr.payload_len),
1102 			      ULP_PRSR_ACT_DEFAULT);
1103 
1104 	/* Ignore proto for template matching */
1105 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1106 	ulp_rte_prsr_fld_mask(params, &idx, size,
1107 			      ulp_deference_struct(ipv6_spec, hdr.proto),
1108 			      ulp_deference_struct(ipv6_mask, hdr.proto),
1109 			      ULP_PRSR_ACT_MATCH_IGNORE);
1110 
1111 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1112 	ulp_rte_prsr_fld_mask(params, &idx, size,
1113 			      ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1114 			      ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1115 			      ULP_PRSR_ACT_DEFAULT);
1116 
1117 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1118 	ulp_rte_prsr_fld_mask(params, &idx, size,
1119 			      ulp_deference_struct(ipv6_spec, hdr.src_addr),
1120 			      ulp_deference_struct(ipv6_mask, hdr.src_addr),
1121 			      ULP_PRSR_ACT_DEFAULT);
1122 
1123 	dip_idx =  idx;
1124 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1125 	ulp_rte_prsr_fld_mask(params, &idx, size,
1126 			      ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1127 			      ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1128 			      ULP_PRSR_ACT_DEFAULT);
1129 
1130 	/* Set the ipv6 header bitmap and computed l3 header bitmaps */
1131 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1132 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1133 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1134 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1135 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1136 		inner_flag = 1;
1137 	} else {
1138 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1139 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1140 		/* Update the tunnel offload dest ip offset */
1141 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1142 				    dip_idx);
1143 	}
1144 
1145 	/* Update the field protocol hdr bitmap */
1146 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1147 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1148 
1149 	return BNXT_TF_RC_SUCCESS;
1150 }
1151 
1152 /* Function to handle the update of proto header based on field values */
1153 static void
1154 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
1155 			     uint16_t src_port, uint16_t src_mask,
1156 			     uint16_t dst_port, uint16_t dst_mask,
1157 			     enum bnxt_ulp_hdr_bit hdr_bit)
1158 {
1159 	switch (hdr_bit) {
1160 	case BNXT_ULP_HDR_BIT_I_UDP:
1161 	case BNXT_ULP_HDR_BIT_I_TCP:
1162 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1163 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1164 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1165 				    (uint64_t)rte_be_to_cpu_16(src_port));
1166 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1167 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1168 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
1169 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1170 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
1171 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1172 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1173 				    1);
1174 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1175 				    !!(src_port & src_mask));
1176 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1177 				    !!(dst_port & dst_mask));
1178 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1179 				    (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
1180 				    IPPROTO_UDP : IPPROTO_TCP);
1181 		break;
1182 	case BNXT_ULP_HDR_BIT_O_UDP:
1183 	case BNXT_ULP_HDR_BIT_O_TCP:
1184 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1185 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1186 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1187 				    (uint64_t)rte_be_to_cpu_16(src_port));
1188 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1189 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1190 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
1191 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1192 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1193 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1194 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1195 				    1);
1196 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1197 				    !!(src_port & src_mask));
1198 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1199 				    !!(dst_port & dst_mask));
1200 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1201 				    (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
1202 				    IPPROTO_UDP : IPPROTO_TCP);
1203 		break;
1204 	default:
1205 		break;
1206 	}
1207 
1208 	if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
1209 	    tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1210 		ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1211 			       BNXT_ULP_HDR_BIT_T_VXLAN);
1212 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1213 	}
1214 }
1215 
1216 /* Function to handle the parsing of RTE Flow item UDP Header. */
1217 int32_t
1218 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1219 			struct ulp_rte_parser_params *params)
1220 {
1221 	const struct rte_flow_item_udp *udp_spec = item->spec;
1222 	const struct rte_flow_item_udp *udp_mask = item->mask;
1223 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1224 	uint32_t idx = 0;
1225 	uint32_t size;
1226 	uint16_t dport = 0, sport = 0;
1227 	uint16_t dport_mask = 0, sport_mask = 0;
1228 	uint32_t cnt;
1229 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
1230 
1231 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1232 	if (cnt == 2) {
1233 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1234 		return BNXT_TF_RC_ERROR;
1235 	}
1236 
1237 	if (udp_spec) {
1238 		sport = udp_spec->hdr.src_port;
1239 		dport = udp_spec->hdr.dst_port;
1240 	}
1241 	if (udp_mask) {
1242 		sport_mask = udp_mask->hdr.src_port;
1243 		dport_mask = udp_mask->hdr.dst_port;
1244 	}
1245 
1246 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1247 					   BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1248 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1249 		return BNXT_TF_RC_ERROR;
1250 	}
1251 
1252 	/*
1253 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1254 	 * header fields
1255 	 */
1256 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1257 	ulp_rte_prsr_fld_mask(params, &idx, size,
1258 			      ulp_deference_struct(udp_spec, hdr.src_port),
1259 			      ulp_deference_struct(udp_mask, hdr.src_port),
1260 			      ULP_PRSR_ACT_DEFAULT);
1261 
1262 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1263 	ulp_rte_prsr_fld_mask(params, &idx, size,
1264 			      ulp_deference_struct(udp_spec, hdr.dst_port),
1265 			      ulp_deference_struct(udp_mask, hdr.dst_port),
1266 			      ULP_PRSR_ACT_DEFAULT);
1267 
1268 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1269 	ulp_rte_prsr_fld_mask(params, &idx, size,
1270 			      ulp_deference_struct(udp_spec, hdr.dgram_len),
1271 			      ulp_deference_struct(udp_mask, hdr.dgram_len),
1272 			      ULP_PRSR_ACT_DEFAULT);
1273 
1274 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1275 	ulp_rte_prsr_fld_mask(params, &idx, size,
1276 			      ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1277 			      ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1278 			      ULP_PRSR_ACT_DEFAULT);
1279 
1280 	/* Set the udp header bitmap and computed l4 header bitmaps */
1281 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1282 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1283 		out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
1284 
1285 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1286 				     dport_mask, out_l4);
1287 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1288 	return BNXT_TF_RC_SUCCESS;
1289 }
1290 
1291 /* Function to handle the parsing of RTE Flow item TCP Header. */
1292 int32_t
1293 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1294 			struct ulp_rte_parser_params *params)
1295 {
1296 	const struct rte_flow_item_tcp *tcp_spec = item->spec;
1297 	const struct rte_flow_item_tcp *tcp_mask = item->mask;
1298 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1299 	uint32_t idx = 0;
1300 	uint16_t dport = 0, sport = 0;
1301 	uint16_t dport_mask = 0, sport_mask = 0;
1302 	uint32_t size;
1303 	uint32_t cnt;
1304 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
1305 
1306 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1307 	if (cnt == 2) {
1308 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1309 		return BNXT_TF_RC_ERROR;
1310 	}
1311 
1312 	if (tcp_spec) {
1313 		sport = tcp_spec->hdr.src_port;
1314 		dport = tcp_spec->hdr.dst_port;
1315 	}
1316 	if (tcp_mask) {
1317 		sport_mask = tcp_mask->hdr.src_port;
1318 		dport_mask = tcp_mask->hdr.dst_port;
1319 	}
1320 
1321 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1322 					   BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1323 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1324 		return BNXT_TF_RC_ERROR;
1325 	}
1326 
1327 	/*
1328 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1329 	 * header fields
1330 	 */
1331 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1332 	ulp_rte_prsr_fld_mask(params, &idx, size,
1333 			      ulp_deference_struct(tcp_spec, hdr.src_port),
1334 			      ulp_deference_struct(tcp_mask, hdr.src_port),
1335 			      ULP_PRSR_ACT_DEFAULT);
1336 
1337 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1338 	ulp_rte_prsr_fld_mask(params, &idx, size,
1339 			      ulp_deference_struct(tcp_spec, hdr.dst_port),
1340 			      ulp_deference_struct(tcp_mask, hdr.dst_port),
1341 			      ULP_PRSR_ACT_DEFAULT);
1342 
1343 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1344 	ulp_rte_prsr_fld_mask(params, &idx, size,
1345 			      ulp_deference_struct(tcp_spec, hdr.sent_seq),
1346 			      ulp_deference_struct(tcp_mask, hdr.sent_seq),
1347 			      ULP_PRSR_ACT_DEFAULT);
1348 
1349 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1350 	ulp_rte_prsr_fld_mask(params, &idx, size,
1351 			      ulp_deference_struct(tcp_spec, hdr.recv_ack),
1352 			      ulp_deference_struct(tcp_mask, hdr.recv_ack),
1353 			      ULP_PRSR_ACT_DEFAULT);
1354 
1355 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1356 	ulp_rte_prsr_fld_mask(params, &idx, size,
1357 			      ulp_deference_struct(tcp_spec, hdr.data_off),
1358 			      ulp_deference_struct(tcp_mask, hdr.data_off),
1359 			      ULP_PRSR_ACT_DEFAULT);
1360 
1361 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1362 	ulp_rte_prsr_fld_mask(params, &idx, size,
1363 			      ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1364 			      ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1365 			      ULP_PRSR_ACT_DEFAULT);
1366 
1367 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1368 	ulp_rte_prsr_fld_mask(params, &idx, size,
1369 			      ulp_deference_struct(tcp_spec, hdr.rx_win),
1370 			      ulp_deference_struct(tcp_mask, hdr.rx_win),
1371 			      ULP_PRSR_ACT_DEFAULT);
1372 
1373 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1374 	ulp_rte_prsr_fld_mask(params, &idx, size,
1375 			      ulp_deference_struct(tcp_spec, hdr.cksum),
1376 			      ulp_deference_struct(tcp_mask, hdr.cksum),
1377 			      ULP_PRSR_ACT_DEFAULT);
1378 
1379 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1380 	ulp_rte_prsr_fld_mask(params, &idx, size,
1381 			      ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1382 			      ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1383 			      ULP_PRSR_ACT_DEFAULT);
1384 
1385 	/* Set the udp header bitmap and computed l4 header bitmaps */
1386 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1387 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1388 		out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
1389 
1390 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1391 				     dport_mask, out_l4);
1392 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1393 	return BNXT_TF_RC_SUCCESS;
1394 }
1395 
1396 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1397 int32_t
1398 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1399 			  struct ulp_rte_parser_params *params)
1400 {
1401 	const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1402 	const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1403 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1404 	uint32_t idx = 0;
1405 	uint32_t size;
1406 
1407 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1408 					   BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1409 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1410 		return BNXT_TF_RC_ERROR;
1411 	}
1412 
1413 	/*
1414 	 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1415 	 * header fields
1416 	 */
1417 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1418 	ulp_rte_prsr_fld_mask(params, &idx, size,
1419 			      ulp_deference_struct(vxlan_spec, flags),
1420 			      ulp_deference_struct(vxlan_mask, flags),
1421 			      ULP_PRSR_ACT_DEFAULT);
1422 
1423 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1424 	ulp_rte_prsr_fld_mask(params, &idx, size,
1425 			      ulp_deference_struct(vxlan_spec, rsvd0),
1426 			      ulp_deference_struct(vxlan_mask, rsvd0),
1427 			      ULP_PRSR_ACT_DEFAULT);
1428 
1429 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1430 	ulp_rte_prsr_fld_mask(params, &idx, size,
1431 			      ulp_deference_struct(vxlan_spec, vni),
1432 			      ulp_deference_struct(vxlan_mask, vni),
1433 			      ULP_PRSR_ACT_DEFAULT);
1434 
1435 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1436 	ulp_rte_prsr_fld_mask(params, &idx, size,
1437 			      ulp_deference_struct(vxlan_spec, rsvd1),
1438 			      ulp_deference_struct(vxlan_mask, rsvd1),
1439 			      ULP_PRSR_ACT_DEFAULT);
1440 
1441 	/* Update the hdr_bitmap with vxlan */
1442 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1443 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1444 	return BNXT_TF_RC_SUCCESS;
1445 }
1446 
1447 /* Function to handle the parsing of RTE Flow item GRE Header. */
1448 int32_t
1449 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1450 			struct ulp_rte_parser_params *params)
1451 {
1452 	const struct rte_flow_item_gre *gre_spec = item->spec;
1453 	const struct rte_flow_item_gre *gre_mask = item->mask;
1454 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1455 	uint32_t idx = 0;
1456 	uint32_t size;
1457 
1458 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1459 					   BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1460 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1461 		return BNXT_TF_RC_ERROR;
1462 	}
1463 
1464 	size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1465 	ulp_rte_prsr_fld_mask(params, &idx, size,
1466 			      ulp_deference_struct(gre_spec, c_rsvd0_ver),
1467 			      ulp_deference_struct(gre_mask, c_rsvd0_ver),
1468 			      ULP_PRSR_ACT_DEFAULT);
1469 
1470 	size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1471 	ulp_rte_prsr_fld_mask(params, &idx, size,
1472 			      ulp_deference_struct(gre_spec, protocol),
1473 			      ulp_deference_struct(gre_mask, protocol),
1474 			      ULP_PRSR_ACT_DEFAULT);
1475 
1476 	/* Update the hdr_bitmap with GRE */
1477 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1478 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1479 	return BNXT_TF_RC_SUCCESS;
1480 }
1481 
1482 /* Function to handle the parsing of RTE Flow item ANY. */
1483 int32_t
1484 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1485 			 struct ulp_rte_parser_params *params __rte_unused)
1486 {
1487 	return BNXT_TF_RC_SUCCESS;
1488 }
1489 
1490 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1491 int32_t
1492 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1493 			 struct ulp_rte_parser_params *params)
1494 {
1495 	const struct rte_flow_item_icmp *icmp_spec = item->spec;
1496 	const struct rte_flow_item_icmp *icmp_mask = item->mask;
1497 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1498 	uint32_t idx = 0;
1499 	uint32_t size;
1500 
1501 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1502 					   BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1503 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1504 		return BNXT_TF_RC_ERROR;
1505 	}
1506 
1507 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1508 	ulp_rte_prsr_fld_mask(params, &idx, size,
1509 			      ulp_deference_struct(icmp_spec, hdr.icmp_type),
1510 			      ulp_deference_struct(icmp_mask, hdr.icmp_type),
1511 			      ULP_PRSR_ACT_DEFAULT);
1512 
1513 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1514 	ulp_rte_prsr_fld_mask(params, &idx, size,
1515 			      ulp_deference_struct(icmp_spec, hdr.icmp_code),
1516 			      ulp_deference_struct(icmp_mask, hdr.icmp_code),
1517 			      ULP_PRSR_ACT_DEFAULT);
1518 
1519 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1520 	ulp_rte_prsr_fld_mask(params, &idx, size,
1521 			      ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1522 			      ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1523 			      ULP_PRSR_ACT_DEFAULT);
1524 
1525 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1526 	ulp_rte_prsr_fld_mask(params, &idx, size,
1527 			      ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1528 			      ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1529 			      ULP_PRSR_ACT_DEFAULT);
1530 
1531 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1532 	ulp_rte_prsr_fld_mask(params, &idx, size,
1533 			      ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1534 			      ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1535 			      ULP_PRSR_ACT_DEFAULT);
1536 
1537 	/* Update the hdr_bitmap with ICMP */
1538 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1539 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1540 	else
1541 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1542 	return BNXT_TF_RC_SUCCESS;
1543 }
1544 
1545 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1546 int32_t
1547 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1548 			  struct ulp_rte_parser_params *params)
1549 {
1550 	const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1551 	const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1552 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1553 	uint32_t idx = 0;
1554 	uint32_t size;
1555 
1556 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1557 					   BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1558 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1559 		return BNXT_TF_RC_ERROR;
1560 	}
1561 
1562 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1563 	ulp_rte_prsr_fld_mask(params, &idx, size,
1564 			      ulp_deference_struct(icmp_spec, type),
1565 			      ulp_deference_struct(icmp_mask, type),
1566 			      ULP_PRSR_ACT_DEFAULT);
1567 
1568 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1569 	ulp_rte_prsr_fld_mask(params, &idx, size,
1570 			      ulp_deference_struct(icmp_spec, code),
1571 			      ulp_deference_struct(icmp_mask, code),
1572 			      ULP_PRSR_ACT_DEFAULT);
1573 
1574 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1575 	ulp_rte_prsr_fld_mask(params, &idx, size,
1576 			      ulp_deference_struct(icmp_spec, checksum),
1577 			      ulp_deference_struct(icmp_mask, checksum),
1578 			      ULP_PRSR_ACT_DEFAULT);
1579 
1580 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1581 		BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1582 		return BNXT_TF_RC_ERROR;
1583 	}
1584 
1585 	/* Update the hdr_bitmap with ICMP */
1586 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1587 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1588 	else
1589 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1590 	return BNXT_TF_RC_SUCCESS;
1591 }
1592 
1593 /* Function to handle the parsing of RTE Flow item void Header */
1594 int32_t
1595 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1596 			 struct ulp_rte_parser_params *params __rte_unused)
1597 {
1598 	return BNXT_TF_RC_SUCCESS;
1599 }
1600 
1601 /* Function to handle the parsing of RTE Flow action void Header. */
1602 int32_t
1603 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1604 			 struct ulp_rte_parser_params *params __rte_unused)
1605 {
1606 	return BNXT_TF_RC_SUCCESS;
1607 }
1608 
1609 /* Function to handle the parsing of RTE Flow action Mark Header. */
1610 int32_t
1611 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1612 			 struct ulp_rte_parser_params *param)
1613 {
1614 	const struct rte_flow_action_mark *mark;
1615 	struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1616 	uint32_t mark_id;
1617 
1618 	mark = action_item->conf;
1619 	if (mark) {
1620 		mark_id = tfp_cpu_to_be_32(mark->id);
1621 		memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1622 		       &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1623 
1624 		/* Update the hdr_bitmap with vxlan */
1625 		ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1626 		return BNXT_TF_RC_SUCCESS;
1627 	}
1628 	BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1629 	return BNXT_TF_RC_ERROR;
1630 }
1631 
1632 /* Function to handle the parsing of RTE Flow action RSS Header. */
1633 int32_t
1634 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1635 			struct ulp_rte_parser_params *param)
1636 {
1637 	const struct rte_flow_action_rss *rss;
1638 	struct ulp_rte_act_prop *ap = &param->act_prop;
1639 
1640 	if (action_item == NULL || action_item->conf == NULL) {
1641 		BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1642 		return BNXT_TF_RC_ERROR;
1643 	}
1644 
1645 	rss = action_item->conf;
1646 	/* Copy the rss into the specific action properties */
1647 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1648 	       BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1649 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1650 	       BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1651 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1652 	       &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1653 
1654 	if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1655 		BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1656 		return BNXT_TF_RC_ERROR;
1657 	}
1658 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1659 	       rss->key_len);
1660 
1661 	/* set the RSS action header bit */
1662 	ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1663 
1664 	return BNXT_TF_RC_SUCCESS;
1665 }
1666 
1667 /* Function to handle the parsing of RTE Flow item eth Header. */
1668 static void
1669 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1670 			    const struct rte_flow_item_eth *eth_spec)
1671 {
1672 	struct ulp_rte_hdr_field *field;
1673 	uint32_t size;
1674 
1675 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1676 	size = sizeof(eth_spec->dst.addr_bytes);
1677 	field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size);
1678 
1679 	size = sizeof(eth_spec->src.addr_bytes);
1680 	field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size);
1681 
1682 	size = sizeof(eth_spec->type);
1683 	field = ulp_rte_parser_fld_copy(field, &eth_spec->type, size);
1684 
1685 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
1686 }
1687 
1688 /* Function to handle the parsing of RTE Flow item vlan Header. */
1689 static void
1690 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
1691 			     const struct rte_flow_item_vlan *vlan_spec,
1692 			     uint32_t inner)
1693 {
1694 	struct ulp_rte_hdr_field *field;
1695 	uint32_t size;
1696 
1697 	if (!inner) {
1698 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
1699 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1700 			       BNXT_ULP_HDR_BIT_OO_VLAN);
1701 	} else {
1702 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
1703 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1704 			       BNXT_ULP_HDR_BIT_OI_VLAN);
1705 	}
1706 
1707 	size = sizeof(vlan_spec->tci);
1708 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size);
1709 
1710 	size = sizeof(vlan_spec->inner_type);
1711 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size);
1712 }
1713 
1714 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
1715 static void
1716 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
1717 			     const struct rte_flow_item_ipv4 *ip)
1718 {
1719 	struct ulp_rte_hdr_field *field;
1720 	uint32_t size;
1721 	uint8_t val8;
1722 
1723 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
1724 	size = sizeof(ip->hdr.version_ihl);
1725 	if (!ip->hdr.version_ihl)
1726 		val8 = RTE_IPV4_VHL_DEF;
1727 	else
1728 		val8 = ip->hdr.version_ihl;
1729 	field = ulp_rte_parser_fld_copy(field, &val8, size);
1730 
1731 	size = sizeof(ip->hdr.type_of_service);
1732 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
1733 
1734 	size = sizeof(ip->hdr.packet_id);
1735 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
1736 
1737 	size = sizeof(ip->hdr.fragment_offset);
1738 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
1739 
1740 	size = sizeof(ip->hdr.time_to_live);
1741 	if (!ip->hdr.time_to_live)
1742 		val8 = BNXT_ULP_DEFAULT_TTL;
1743 	else
1744 		val8 = ip->hdr.time_to_live;
1745 	field = ulp_rte_parser_fld_copy(field, &val8, size);
1746 
1747 	size = sizeof(ip->hdr.next_proto_id);
1748 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
1749 
1750 	size = sizeof(ip->hdr.src_addr);
1751 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1752 
1753 	size = sizeof(ip->hdr.dst_addr);
1754 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1755 
1756 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
1757 }
1758 
1759 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
1760 static void
1761 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
1762 			     const struct rte_flow_item_ipv6 *ip)
1763 {
1764 	struct ulp_rte_hdr_field *field;
1765 	uint32_t size;
1766 	uint32_t val32;
1767 	uint8_t val8;
1768 
1769 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
1770 	size = sizeof(ip->hdr.vtc_flow);
1771 	if (!ip->hdr.vtc_flow)
1772 		val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
1773 	else
1774 		val32 = ip->hdr.vtc_flow;
1775 	field = ulp_rte_parser_fld_copy(field, &val32, size);
1776 
1777 	size = sizeof(ip->hdr.proto);
1778 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
1779 
1780 	size = sizeof(ip->hdr.hop_limits);
1781 	if (!ip->hdr.hop_limits)
1782 		val8 = BNXT_ULP_DEFAULT_TTL;
1783 	else
1784 		val8 = ip->hdr.hop_limits;
1785 	field = ulp_rte_parser_fld_copy(field, &val8, size);
1786 
1787 	size = sizeof(ip->hdr.src_addr);
1788 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1789 
1790 	size = sizeof(ip->hdr.dst_addr);
1791 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1792 
1793 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
1794 }
1795 
1796 /* Function to handle the parsing of RTE Flow item UDP Header. */
1797 static void
1798 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
1799 			    const struct rte_flow_item_udp *udp_spec)
1800 {
1801 	struct ulp_rte_hdr_field *field;
1802 	uint32_t size;
1803 	uint8_t type = IPPROTO_UDP;
1804 
1805 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
1806 	size = sizeof(udp_spec->hdr.src_port);
1807 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
1808 
1809 	size = sizeof(udp_spec->hdr.dst_port);
1810 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
1811 
1812 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
1813 
1814 	/* Update thhe ip header protocol */
1815 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
1816 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1817 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
1818 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1819 }
1820 
1821 /* Function to handle the parsing of RTE Flow item vxlan Header. */
1822 static void
1823 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
1824 			      struct rte_flow_item_vxlan *vxlan_spec)
1825 {
1826 	struct ulp_rte_hdr_field *field;
1827 	uint32_t size;
1828 
1829 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
1830 	size = sizeof(vxlan_spec->flags);
1831 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size);
1832 
1833 	size = sizeof(vxlan_spec->rsvd0);
1834 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size);
1835 
1836 	size = sizeof(vxlan_spec->vni);
1837 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size);
1838 
1839 	size = sizeof(vxlan_spec->rsvd1);
1840 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size);
1841 
1842 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1843 }
1844 
1845 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1846 int32_t
1847 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1848 				struct ulp_rte_parser_params *params)
1849 {
1850 	const struct rte_flow_action_vxlan_encap *vxlan_encap;
1851 	const struct rte_flow_item *item;
1852 	const struct rte_flow_item_ipv4 *ipv4_spec;
1853 	const struct rte_flow_item_ipv6 *ipv6_spec;
1854 	struct rte_flow_item_vxlan vxlan_spec;
1855 	uint32_t vlan_num = 0, vlan_size = 0;
1856 	uint32_t ip_size = 0, ip_type = 0;
1857 	uint32_t vxlan_size = 0;
1858 	struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1859 	struct ulp_rte_act_prop *ap = &params->act_prop;
1860 
1861 	vxlan_encap = action_item->conf;
1862 	if (!vxlan_encap) {
1863 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1864 		return BNXT_TF_RC_ERROR;
1865 	}
1866 
1867 	item = vxlan_encap->definition;
1868 	if (!item) {
1869 		BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1870 		return BNXT_TF_RC_ERROR;
1871 	}
1872 
1873 	if (!ulp_rte_item_skip_void(&item, 0))
1874 		return BNXT_TF_RC_ERROR;
1875 
1876 	/* must have ethernet header */
1877 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1878 		BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1879 		return BNXT_TF_RC_ERROR;
1880 	}
1881 
1882 	/* Parse the ethernet header */
1883 	if (item->spec)
1884 		ulp_rte_enc_eth_hdr_handler(params, item->spec);
1885 
1886 	/* Goto the next item */
1887 	if (!ulp_rte_item_skip_void(&item, 1))
1888 		return BNXT_TF_RC_ERROR;
1889 
1890 	/* May have vlan header */
1891 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1892 		vlan_num++;
1893 		if (item->spec)
1894 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
1895 
1896 		if (!ulp_rte_item_skip_void(&item, 1))
1897 			return BNXT_TF_RC_ERROR;
1898 	}
1899 
1900 	/* may have two vlan headers */
1901 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1902 		vlan_num++;
1903 		if (item->spec)
1904 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
1905 
1906 		if (!ulp_rte_item_skip_void(&item, 1))
1907 			return BNXT_TF_RC_ERROR;
1908 	}
1909 
1910 	/* Update the vlan count and size of more than one */
1911 	if (vlan_num) {
1912 		vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1913 		vlan_num = tfp_cpu_to_be_32(vlan_num);
1914 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1915 		       &vlan_num,
1916 		       sizeof(uint32_t));
1917 		vlan_size = tfp_cpu_to_be_32(vlan_size);
1918 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1919 		       &vlan_size,
1920 		       sizeof(uint32_t));
1921 	}
1922 
1923 	/* L3 must be IPv4, IPv6 */
1924 	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1925 		ipv4_spec = item->spec;
1926 		ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1927 
1928 		/* Update the ip size details */
1929 		ip_size = tfp_cpu_to_be_32(ip_size);
1930 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1931 		       &ip_size, sizeof(uint32_t));
1932 
1933 		/* update the ip type */
1934 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1935 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1936 		       &ip_type, sizeof(uint32_t));
1937 
1938 		/* update the computed field to notify it is ipv4 header */
1939 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1940 				    1);
1941 		if (ipv4_spec)
1942 			ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
1943 
1944 		if (!ulp_rte_item_skip_void(&item, 1))
1945 			return BNXT_TF_RC_ERROR;
1946 	} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1947 		ipv6_spec = item->spec;
1948 		ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1949 
1950 		/* Update the ip size details */
1951 		ip_size = tfp_cpu_to_be_32(ip_size);
1952 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1953 		       &ip_size, sizeof(uint32_t));
1954 
1955 		 /* update the ip type */
1956 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1957 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1958 		       &ip_type, sizeof(uint32_t));
1959 
1960 		/* update the computed field to notify it is ipv6 header */
1961 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1962 				    1);
1963 		if (ipv6_spec)
1964 			ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
1965 
1966 		if (!ulp_rte_item_skip_void(&item, 1))
1967 			return BNXT_TF_RC_ERROR;
1968 	} else {
1969 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1970 		return BNXT_TF_RC_ERROR;
1971 	}
1972 
1973 	/* L4 is UDP */
1974 	if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1975 		BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1976 		return BNXT_TF_RC_ERROR;
1977 	}
1978 	if (item->spec)
1979 		ulp_rte_enc_udp_hdr_handler(params, item->spec);
1980 
1981 	if (!ulp_rte_item_skip_void(&item, 1))
1982 		return BNXT_TF_RC_ERROR;
1983 
1984 	/* Finally VXLAN */
1985 	if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1986 		BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1987 		return BNXT_TF_RC_ERROR;
1988 	}
1989 	vxlan_size = sizeof(struct rte_flow_item_vxlan);
1990 	/* copy the vxlan details */
1991 	memcpy(&vxlan_spec, item->spec, vxlan_size);
1992 	vxlan_spec.flags = 0x08;
1993 	vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1994 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1995 	       &vxlan_size, sizeof(uint32_t));
1996 
1997 	ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
1998 
1999 	/* update the hdr_bitmap with vxlan */
2000 	ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2001 	return BNXT_TF_RC_SUCCESS;
2002 }
2003 
2004 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2005 int32_t
2006 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2007 				__rte_unused,
2008 				struct ulp_rte_parser_params *params)
2009 {
2010 	/* update the hdr_bitmap with vxlan */
2011 	ULP_BITMAP_SET(params->act_bitmap.bits,
2012 		       BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2013 	/* Update computational field with tunnel decap info */
2014 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2015 	return BNXT_TF_RC_SUCCESS;
2016 }
2017 
2018 /* Function to handle the parsing of RTE Flow action drop Header. */
2019 int32_t
2020 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2021 			 struct ulp_rte_parser_params *params)
2022 {
2023 	/* Update the hdr_bitmap with drop */
2024 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2025 	return BNXT_TF_RC_SUCCESS;
2026 }
2027 
2028 /* Function to handle the parsing of RTE Flow action count. */
2029 int32_t
2030 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2031 			  struct ulp_rte_parser_params *params)
2032 {
2033 	const struct rte_flow_action_count *act_count;
2034 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
2035 
2036 	act_count = action_item->conf;
2037 	if (act_count) {
2038 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2039 		       &act_count->id,
2040 		       BNXT_ULP_ACT_PROP_SZ_COUNT);
2041 	}
2042 
2043 	/* Update the hdr_bitmap with count */
2044 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2045 	return BNXT_TF_RC_SUCCESS;
2046 }
2047 
2048 /* Function to handle the parsing of action ports. */
2049 static int32_t
2050 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2051 			    uint32_t ifindex,
2052 			    enum bnxt_ulp_direction_type act_dir)
2053 {
2054 	enum bnxt_ulp_direction_type dir;
2055 	uint16_t pid_s;
2056 	uint32_t pid;
2057 	struct ulp_rte_act_prop *act = &param->act_prop;
2058 	enum bnxt_ulp_intf_type port_type;
2059 	uint32_t vnic_type;
2060 
2061 	/* Get the direction */
2062 	/* If action implicitly specifies direction, use the specification. */
2063 	dir = (act_dir == BNXT_ULP_DIR_INVALID) ?
2064 		ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) :
2065 		act_dir;
2066 	port_type = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2067 	if (dir == BNXT_ULP_DIR_EGRESS &&
2068 	    port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
2069 		/* For egress direction, fill vport */
2070 		if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2071 			return BNXT_TF_RC_ERROR;
2072 
2073 		pid = pid_s;
2074 		pid = rte_cpu_to_be_32(pid);
2075 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2076 		       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2077 	} else {
2078 		/* For ingress direction, fill vnic */
2079 		/*
2080 		 * Action		Destination
2081 		 * ------------------------------------
2082 		 * PORT_REPRESENTOR	Driver Function
2083 		 * ------------------------------------
2084 		 * REPRESENTED_PORT	VF
2085 		 * ------------------------------------
2086 		 * PORT_ID		VF
2087 		 */
2088 		if (act_dir != BNXT_ULP_DIR_INGRESS &&
2089 		    port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2090 			vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2091 		else
2092 			vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2093 
2094 		if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2095 						 vnic_type, &pid_s))
2096 			return BNXT_TF_RC_ERROR;
2097 
2098 		pid = pid_s;
2099 		pid = rte_cpu_to_be_32(pid);
2100 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2101 		       &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2102 	}
2103 
2104 	/* Update the action port set bit */
2105 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2106 	return BNXT_TF_RC_SUCCESS;
2107 }
2108 
2109 /* Function to handle the parsing of RTE Flow action PF. */
2110 int32_t
2111 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2112 		       struct ulp_rte_parser_params *params)
2113 {
2114 	uint32_t port_id;
2115 	uint32_t ifindex;
2116 	enum bnxt_ulp_intf_type intf_type;
2117 
2118 	/* Get the port id of the current device */
2119 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2120 
2121 	/* Get the port db ifindex */
2122 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2123 					      &ifindex)) {
2124 		BNXT_TF_DBG(ERR, "Invalid port id\n");
2125 		return BNXT_TF_RC_ERROR;
2126 	}
2127 
2128 	/* Check the port is PF port */
2129 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2130 	if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2131 		BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2132 		return BNXT_TF_RC_ERROR;
2133 	}
2134 	/* Update the action properties */
2135 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2136 	return ulp_rte_parser_act_port_set(params, ifindex,
2137 					   BNXT_ULP_DIR_INVALID);
2138 }
2139 
2140 /* Function to handle the parsing of RTE Flow action VF. */
2141 int32_t
2142 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2143 		       struct ulp_rte_parser_params *params)
2144 {
2145 	const struct rte_flow_action_vf *vf_action;
2146 	enum bnxt_ulp_intf_type intf_type;
2147 	uint32_t ifindex;
2148 	struct bnxt *bp;
2149 
2150 	vf_action = action_item->conf;
2151 	if (!vf_action) {
2152 		BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2153 		return BNXT_TF_RC_PARSE_ERR;
2154 	}
2155 
2156 	if (vf_action->original) {
2157 		BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2158 		return BNXT_TF_RC_PARSE_ERR;
2159 	}
2160 
2161 	bp = bnxt_pmd_get_bp(params->port_id);
2162 	if (bp == NULL) {
2163 		BNXT_TF_DBG(ERR, "Invalid bp\n");
2164 		return BNXT_TF_RC_ERROR;
2165 	}
2166 
2167 	/* vf_action->id is a logical number which in this case is an
2168 	 * offset from the first VF. So, to get the absolute VF id, the
2169 	 * offset must be added to the absolute first vf id of that port.
2170 	 */
2171 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2172 						 bp->first_vf_id +
2173 						 vf_action->id,
2174 						 &ifindex)) {
2175 		BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2176 		return BNXT_TF_RC_ERROR;
2177 	}
2178 	/* Check the port is VF port */
2179 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2180 	if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2181 	    intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2182 		BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2183 		return BNXT_TF_RC_ERROR;
2184 	}
2185 
2186 	/* Update the action properties */
2187 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2188 	return ulp_rte_parser_act_port_set(params, ifindex,
2189 					   BNXT_ULP_DIR_INVALID);
2190 }
2191 
2192 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
2193 int32_t
2194 ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
2195 			 struct ulp_rte_parser_params *param)
2196 {
2197 	uint32_t ethdev_id;
2198 	uint32_t ifindex;
2199 	enum bnxt_ulp_intf_type intf_type;
2200 	enum bnxt_ulp_direction_type act_dir;
2201 
2202 	if (!act_item->conf) {
2203 		BNXT_TF_DBG(ERR,
2204 			    "ParseErr: Invalid Argument\n");
2205 		return BNXT_TF_RC_PARSE_ERR;
2206 	}
2207 	switch (act_item->type) {
2208 	case RTE_FLOW_ACTION_TYPE_PORT_ID: {
2209 		const struct rte_flow_action_port_id *port_id = act_item->conf;
2210 
2211 		if (port_id->original) {
2212 			BNXT_TF_DBG(ERR,
2213 				    "ParseErr:Portid Original not supported\n");
2214 			return BNXT_TF_RC_PARSE_ERR;
2215 		}
2216 		ethdev_id = port_id->id;
2217 		act_dir = BNXT_ULP_DIR_INVALID;
2218 		break;
2219 	}
2220 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
2221 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2222 
2223 		ethdev_id = ethdev->port_id;
2224 		act_dir = BNXT_ULP_DIR_INGRESS;
2225 		break;
2226 	}
2227 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
2228 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2229 
2230 		ethdev_id = ethdev->port_id;
2231 		act_dir = BNXT_ULP_DIR_EGRESS;
2232 		break;
2233 	}
2234 	default:
2235 		BNXT_TF_DBG(ERR, "Unknown port action\n");
2236 		return BNXT_TF_RC_ERROR;
2237 	}
2238 
2239 	/* Get the port db ifindex */
2240 	if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id,
2241 					      &ifindex)) {
2242 		BNXT_TF_DBG(ERR, "Invalid port id\n");
2243 		return BNXT_TF_RC_ERROR;
2244 	}
2245 
2246 	/* Get the intf type */
2247 	intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2248 	if (!intf_type) {
2249 		BNXT_TF_DBG(ERR, "Invalid port type\n");
2250 		return BNXT_TF_RC_ERROR;
2251 	}
2252 
2253 	/* Set the action port */
2254 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2255 	return ulp_rte_parser_act_port_set(param, ifindex, act_dir);
2256 }
2257 
2258 /* Function to handle the parsing of RTE Flow action phy_port. */
2259 int32_t
2260 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2261 			     struct ulp_rte_parser_params *prm)
2262 {
2263 	const struct rte_flow_action_phy_port *phy_port;
2264 	uint32_t pid;
2265 	int32_t rc;
2266 	uint16_t pid_s;
2267 	enum bnxt_ulp_direction_type dir;
2268 
2269 	phy_port = action_item->conf;
2270 	if (!phy_port) {
2271 		BNXT_TF_DBG(ERR,
2272 			    "ParseErr: Invalid Argument\n");
2273 		return BNXT_TF_RC_PARSE_ERR;
2274 	}
2275 
2276 	if (phy_port->original) {
2277 		BNXT_TF_DBG(ERR,
2278 			    "Parse Err:Port Original not supported\n");
2279 		return BNXT_TF_RC_PARSE_ERR;
2280 	}
2281 	dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2282 	if (dir != BNXT_ULP_DIR_EGRESS) {
2283 		BNXT_TF_DBG(ERR,
2284 			    "Parse Err:Phy ports are valid only for egress\n");
2285 		return BNXT_TF_RC_PARSE_ERR;
2286 	}
2287 	/* Get the physical port details from port db */
2288 	rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2289 					    &pid_s);
2290 	if (rc) {
2291 		BNXT_TF_DBG(ERR, "Failed to get port details\n");
2292 		return -EINVAL;
2293 	}
2294 
2295 	pid = pid_s;
2296 	pid = rte_cpu_to_be_32(pid);
2297 	memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2298 	       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2299 
2300 	/* Update the action port set bit */
2301 	ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2302 	ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2303 			    BNXT_ULP_INTF_TYPE_PHY_PORT);
2304 	return BNXT_TF_RC_SUCCESS;
2305 }
2306 
2307 /* Function to handle the parsing of RTE Flow action pop vlan. */
2308 int32_t
2309 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2310 				struct ulp_rte_parser_params *params)
2311 {
2312 	/* Update the act_bitmap with pop */
2313 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2314 	return BNXT_TF_RC_SUCCESS;
2315 }
2316 
2317 /* Function to handle the parsing of RTE Flow action push vlan. */
2318 int32_t
2319 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2320 				 struct ulp_rte_parser_params *params)
2321 {
2322 	const struct rte_flow_action_of_push_vlan *push_vlan;
2323 	uint16_t ethertype;
2324 	struct ulp_rte_act_prop *act = &params->act_prop;
2325 
2326 	push_vlan = action_item->conf;
2327 	if (push_vlan) {
2328 		ethertype = push_vlan->ethertype;
2329 		if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2330 			BNXT_TF_DBG(ERR,
2331 				    "Parse Err: Ethertype not supported\n");
2332 			return BNXT_TF_RC_PARSE_ERR;
2333 		}
2334 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2335 		       &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2336 		/* Update the hdr_bitmap with push vlan */
2337 		ULP_BITMAP_SET(params->act_bitmap.bits,
2338 			       BNXT_ULP_ACT_BIT_PUSH_VLAN);
2339 		return BNXT_TF_RC_SUCCESS;
2340 	}
2341 	BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2342 	return BNXT_TF_RC_ERROR;
2343 }
2344 
2345 /* Function to handle the parsing of RTE Flow action set vlan id. */
2346 int32_t
2347 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2348 				    struct ulp_rte_parser_params *params)
2349 {
2350 	const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2351 	uint32_t vid;
2352 	struct ulp_rte_act_prop *act = &params->act_prop;
2353 
2354 	vlan_vid = action_item->conf;
2355 	if (vlan_vid && vlan_vid->vlan_vid) {
2356 		vid = vlan_vid->vlan_vid;
2357 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2358 		       &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2359 		/* Update the hdr_bitmap with vlan vid */
2360 		ULP_BITMAP_SET(params->act_bitmap.bits,
2361 			       BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2362 		return BNXT_TF_RC_SUCCESS;
2363 	}
2364 	BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2365 	return BNXT_TF_RC_ERROR;
2366 }
2367 
2368 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2369 int32_t
2370 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2371 				    struct ulp_rte_parser_params *params)
2372 {
2373 	const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2374 	uint8_t pcp;
2375 	struct ulp_rte_act_prop *act = &params->act_prop;
2376 
2377 	vlan_pcp = action_item->conf;
2378 	if (vlan_pcp) {
2379 		pcp = vlan_pcp->vlan_pcp;
2380 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2381 		       &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2382 		/* Update the hdr_bitmap with vlan vid */
2383 		ULP_BITMAP_SET(params->act_bitmap.bits,
2384 			       BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2385 		return BNXT_TF_RC_SUCCESS;
2386 	}
2387 	BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2388 	return BNXT_TF_RC_ERROR;
2389 }
2390 
2391 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2392 int32_t
2393 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2394 				 struct ulp_rte_parser_params *params)
2395 {
2396 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2397 	struct ulp_rte_act_prop *act = &params->act_prop;
2398 
2399 	set_ipv4 = action_item->conf;
2400 	if (set_ipv4) {
2401 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2402 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2403 		/* Update the hdr_bitmap with set ipv4 src */
2404 		ULP_BITMAP_SET(params->act_bitmap.bits,
2405 			       BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2406 		return BNXT_TF_RC_SUCCESS;
2407 	}
2408 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2409 	return BNXT_TF_RC_ERROR;
2410 }
2411 
2412 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2413 int32_t
2414 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2415 				 struct ulp_rte_parser_params *params)
2416 {
2417 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2418 	struct ulp_rte_act_prop *act = &params->act_prop;
2419 
2420 	set_ipv4 = action_item->conf;
2421 	if (set_ipv4) {
2422 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2423 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2424 		/* Update the hdr_bitmap with set ipv4 dst */
2425 		ULP_BITMAP_SET(params->act_bitmap.bits,
2426 			       BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2427 		return BNXT_TF_RC_SUCCESS;
2428 	}
2429 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2430 	return BNXT_TF_RC_ERROR;
2431 }
2432 
2433 /* Function to handle the parsing of RTE Flow action set tp src.*/
2434 int32_t
2435 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2436 			       struct ulp_rte_parser_params *params)
2437 {
2438 	const struct rte_flow_action_set_tp *set_tp;
2439 	struct ulp_rte_act_prop *act = &params->act_prop;
2440 
2441 	set_tp = action_item->conf;
2442 	if (set_tp) {
2443 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2444 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2445 		/* Update the hdr_bitmap with set tp src */
2446 		ULP_BITMAP_SET(params->act_bitmap.bits,
2447 			       BNXT_ULP_ACT_BIT_SET_TP_SRC);
2448 		return BNXT_TF_RC_SUCCESS;
2449 	}
2450 
2451 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2452 	return BNXT_TF_RC_ERROR;
2453 }
2454 
2455 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2456 int32_t
2457 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2458 			       struct ulp_rte_parser_params *params)
2459 {
2460 	const struct rte_flow_action_set_tp *set_tp;
2461 	struct ulp_rte_act_prop *act = &params->act_prop;
2462 
2463 	set_tp = action_item->conf;
2464 	if (set_tp) {
2465 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2466 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2467 		/* Update the hdr_bitmap with set tp dst */
2468 		ULP_BITMAP_SET(params->act_bitmap.bits,
2469 			       BNXT_ULP_ACT_BIT_SET_TP_DST);
2470 		return BNXT_TF_RC_SUCCESS;
2471 	}
2472 
2473 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2474 	return BNXT_TF_RC_ERROR;
2475 }
2476 
2477 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2478 int32_t
2479 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2480 			    struct ulp_rte_parser_params *params)
2481 {
2482 	/* Update the act_bitmap with dec ttl */
2483 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2484 	return BNXT_TF_RC_SUCCESS;
2485 }
2486 
2487 /* Function to handle the parsing of RTE Flow action JUMP */
2488 int32_t
2489 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2490 			 struct ulp_rte_parser_params *params)
2491 {
2492 	/* Update the act_bitmap with dec ttl */
2493 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2494 	return BNXT_TF_RC_SUCCESS;
2495 }
2496 
2497 int32_t
2498 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2499 			   struct ulp_rte_parser_params *params)
2500 {
2501 	const struct rte_flow_action_sample *sample;
2502 	int ret;
2503 
2504 	sample = action_item->conf;
2505 
2506 	/* if SAMPLE bit is set it means this sample action is nested within the
2507 	 * actions of another sample action; this is not allowed
2508 	 */
2509 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2510 			     BNXT_ULP_ACT_BIT_SAMPLE))
2511 		return BNXT_TF_RC_ERROR;
2512 
2513 	/* a sample action is only allowed as a shared action */
2514 	if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2515 			      BNXT_ULP_ACT_BIT_SHARED))
2516 		return BNXT_TF_RC_ERROR;
2517 
2518 	/* only a ratio of 1 i.e. 100% is supported */
2519 	if (sample->ratio != 1)
2520 		return BNXT_TF_RC_ERROR;
2521 
2522 	if (!sample->actions)
2523 		return BNXT_TF_RC_ERROR;
2524 
2525 	/* parse the nested actions for a sample action */
2526 	ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2527 	if (ret == BNXT_TF_RC_SUCCESS)
2528 		/* Update the act_bitmap with sample */
2529 		ULP_BITMAP_SET(params->act_bitmap.bits,
2530 			       BNXT_ULP_ACT_BIT_SAMPLE);
2531 
2532 	return ret;
2533 }
2534 
2535 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2536 int32_t
2537 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2538 				   struct ulp_rte_parser_params *params)
2539 {
2540 	/* Set the F1 flow header bit */
2541 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2542 	return ulp_rte_vxlan_decap_act_handler(action_item, params);
2543 }
2544 
2545 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2546 int32_t
2547 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2548 				       struct ulp_rte_parser_params *params)
2549 {
2550 	RTE_SET_USED(item);
2551 	/* Set the F2 flow header bit */
2552 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2553 	return ulp_rte_vxlan_decap_act_handler(NULL, params);
2554 }
2555