xref: /dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.c (revision d38febb08d57fec29fed27a2d12a507fc6fcdfa1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_log.h>
7 #include <rte_malloc.h>
8 #include <rte_flow.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
11 #include <rte_spinlock.h>
12 
13 #include "bnxt.h"
14 #include "bnxt_ulp.h"
15 #include "bnxt_tf_common.h"
16 #include "tf_core.h"
17 #include "tf_ext_flow_handle.h"
18 
19 #include "ulp_template_db_enum.h"
20 #include "ulp_template_struct.h"
21 #include "ulp_mark_mgr.h"
22 #include "ulp_fc_mgr.h"
23 #include "ulp_flow_db.h"
24 #include "ulp_mapper.h"
25 #include "ulp_port_db.h"
26 #include "ulp_tun.h"
27 #include "ulp_ha_mgr.h"
28 #include "bnxt_tf_pmd_shim.h"
29 
30 /* Linked list of all TF sessions. */
31 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
32 			STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
33 
34 /* Mutex to synchronize bnxt_ulp_session_list operations. */
35 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
36 
37 /* Spin lock to protect context global list */
38 rte_spinlock_t bnxt_ulp_ctxt_lock;
39 TAILQ_HEAD(cntx_list_entry_list, ulp_context_list_entry);
40 static struct cntx_list_entry_list ulp_cntx_list =
41 	TAILQ_HEAD_INITIALIZER(ulp_cntx_list);
42 
43 /* Static function declarations */
44 static int32_t bnxt_ulp_cntxt_list_init(void);
45 static int32_t bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx);
46 static void bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx);
47 
48 /*
49  * Allow the deletion of context only for the bnxt device that
50  * created the session.
51  */
52 bool
53 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
54 {
55 	if (!ulp_ctx || !ulp_ctx->cfg_data)
56 		return false;
57 
58 	if (!ulp_ctx->cfg_data->ref_cnt) {
59 		BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
60 		return true;
61 	}
62 
63 	return false;
64 }
65 
66 static int32_t
67 bnxt_ulp_devid_get(struct bnxt *bp,
68 		   enum bnxt_ulp_device_id  *ulp_dev_id)
69 {
70 	if (BNXT_CHIP_P5(bp)) {
71 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR;
72 		return 0;
73 	}
74 
75 	if (BNXT_STINGRAY(bp))
76 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_STINGRAY;
77 	else
78 		/* Assuming Whitney */
79 		*ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
80 
81 	return 0;
82 }
83 
84 struct bnxt_ulp_app_capabilities_info *
85 bnxt_ulp_app_cap_list_get(uint32_t *num_entries)
86 {
87 	if (!num_entries)
88 		return NULL;
89 	*num_entries = BNXT_ULP_APP_CAP_TBL_MAX_SZ;
90 	return ulp_app_cap_info_list;
91 }
92 
93 static struct bnxt_ulp_resource_resv_info *
94 bnxt_ulp_app_resource_resv_list_get(uint32_t *num_entries)
95 {
96 	if (num_entries == NULL)
97 		return NULL;
98 	*num_entries = BNXT_ULP_APP_RESOURCE_RESV_LIST_MAX_SZ;
99 	return ulp_app_resource_resv_list;
100 }
101 
102 struct bnxt_ulp_resource_resv_info *
103 bnxt_ulp_resource_resv_list_get(uint32_t *num_entries)
104 {
105 	if (!num_entries)
106 		return NULL;
107 	*num_entries = BNXT_ULP_RESOURCE_RESV_LIST_MAX_SZ;
108 	return ulp_resource_resv_list;
109 }
110 
111 struct bnxt_ulp_glb_resource_info *
112 bnxt_ulp_app_glb_resource_info_list_get(uint32_t *num_entries)
113 {
114 	if (!num_entries)
115 		return NULL;
116 	*num_entries = BNXT_ULP_APP_GLB_RESOURCE_TBL_MAX_SZ;
117 	return ulp_app_glb_resource_tbl;
118 }
119 
120 static int32_t
121 bnxt_ulp_named_resources_calc(struct bnxt_ulp_context *ulp_ctx,
122 			      struct bnxt_ulp_glb_resource_info *info,
123 			      uint32_t num,
124 			      struct tf_session_resources *res)
125 {
126 	uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST, res_type, i;
127 	enum tf_dir dir;
128 	uint8_t app_id;
129 	int32_t rc = 0;
130 
131 	if (ulp_ctx == NULL || info == NULL || res == NULL || num == 0) {
132 		BNXT_TF_DBG(ERR, "Invalid parms to named resources calc.\n");
133 		return -EINVAL;
134 	}
135 
136 	rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
137 	if (rc) {
138 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
139 		return -EINVAL;
140 	}
141 
142 	rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
143 	if (rc) {
144 		BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
145 		return -EINVAL;
146 	}
147 
148 	for (i = 0; i < num; i++) {
149 		if (dev_id != info[i].device_id || app_id != info[i].app_id)
150 			continue;
151 		dir = info[i].direction;
152 		res_type = info[i].resource_type;
153 
154 		switch (info[i].resource_func) {
155 		case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
156 			res->ident_cnt[dir].cnt[res_type]++;
157 			break;
158 		case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
159 			res->tbl_cnt[dir].cnt[res_type]++;
160 			break;
161 		case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
162 			res->tcam_cnt[dir].cnt[res_type]++;
163 			break;
164 		case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
165 			res->em_cnt[dir].cnt[res_type]++;
166 			break;
167 		default:
168 			BNXT_TF_DBG(ERR, "Unknown resource func (0x%x)\n,",
169 				    info[i].resource_func);
170 			continue;
171 		}
172 	}
173 
174 	return 0;
175 }
176 
177 static int32_t
178 bnxt_ulp_unnamed_resources_calc(struct bnxt_ulp_context *ulp_ctx,
179 				struct bnxt_ulp_resource_resv_info *info,
180 				uint32_t num,
181 				struct tf_session_resources *res)
182 {
183 	uint32_t dev_id, res_type, i;
184 	enum tf_dir dir;
185 	uint8_t app_id;
186 	int32_t rc = 0;
187 
188 	if (ulp_ctx == NULL || res == NULL || info == NULL || num == 0) {
189 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
190 		return -EINVAL;
191 	}
192 
193 	rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
194 	if (rc) {
195 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
196 		return -EINVAL;
197 	}
198 
199 	rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
200 	if (rc) {
201 		BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
202 		return -EINVAL;
203 	}
204 
205 	for (i = 0; i < num; i++) {
206 		if (app_id != info[i].app_id || dev_id != info[i].device_id)
207 			continue;
208 		dir = info[i].direction;
209 		res_type = info[i].resource_type;
210 
211 		switch (info[i].resource_func) {
212 		case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
213 			res->ident_cnt[dir].cnt[res_type] = info[i].count;
214 			break;
215 		case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
216 			res->tbl_cnt[dir].cnt[res_type] = info[i].count;
217 			break;
218 		case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
219 			res->tcam_cnt[dir].cnt[res_type] = info[i].count;
220 			break;
221 		case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
222 			res->em_cnt[dir].cnt[res_type] = info[i].count;
223 			break;
224 		default:
225 			break;
226 		}
227 	}
228 	return 0;
229 }
230 
231 static int32_t
232 bnxt_ulp_tf_resources_get(struct bnxt_ulp_context *ulp_ctx,
233 			  struct tf_session_resources *res)
234 {
235 	struct bnxt_ulp_resource_resv_info *unnamed = NULL;
236 	uint32_t unum;
237 	int32_t rc = 0;
238 
239 	if (ulp_ctx == NULL || res == NULL) {
240 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
241 		return -EINVAL;
242 	}
243 
244 	unnamed = bnxt_ulp_resource_resv_list_get(&unum);
245 	if (unnamed == NULL) {
246 		BNXT_TF_DBG(ERR, "Unable to get resource resv list.\n");
247 		return -EINVAL;
248 	}
249 
250 	rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
251 	if (rc)
252 		BNXT_TF_DBG(ERR, "Unable to calc resources for session.\n");
253 
254 	return rc;
255 }
256 
257 static int32_t
258 bnxt_ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx,
259 					 struct tf_session_resources *res)
260 {
261 	struct bnxt_ulp_resource_resv_info *unnamed;
262 	struct bnxt_ulp_glb_resource_info *named;
263 	uint32_t unum, nnum;
264 	int32_t rc;
265 
266 	if (ulp_ctx == NULL || res == NULL) {
267 		BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
268 		return -EINVAL;
269 	}
270 
271 	/* Make sure the resources are zero before accumulating. */
272 	memset(res, 0, sizeof(struct tf_session_resources));
273 
274 	/*
275 	 * Shared resources are comprised of both named and unnamed resources.
276 	 * First get the unnamed counts, and then add the named to the result.
277 	 */
278 	/* Get the baseline counts */
279 	unnamed = bnxt_ulp_app_resource_resv_list_get(&unum);
280 	if (unnamed == NULL) {
281 		BNXT_TF_DBG(ERR, "Unable to get shared resource resv list.\n");
282 		return -EINVAL;
283 	}
284 	rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
285 	if (rc) {
286 		BNXT_TF_DBG(ERR, "Unable to calc resources for shared session.\n");
287 		return -EINVAL;
288 	}
289 
290 	/* Get the named list and add the totals */
291 	named = bnxt_ulp_app_glb_resource_info_list_get(&nnum);
292 	if (named == NULL) {
293 		BNXT_TF_DBG(ERR, "Unable to get app global resource list\n");
294 		return -EINVAL;
295 	}
296 	rc = bnxt_ulp_named_resources_calc(ulp_ctx, named, nnum, res);
297 	if (rc)
298 		BNXT_TF_DBG(ERR, "Unable to calc named resources\n");
299 
300 	return rc;
301 }
302 
303 int32_t
304 bnxt_ulp_cntxt_app_caps_init(struct bnxt_ulp_context *ulp_ctx,
305 			     uint8_t app_id, uint32_t dev_id)
306 {
307 	struct bnxt_ulp_app_capabilities_info *info;
308 	uint32_t num = 0;
309 	uint16_t i;
310 	bool found = false;
311 
312 	if (ULP_APP_DEV_UNSUPPORTED_ENABLED(ulp_ctx->cfg_data->ulp_flags)) {
313 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
314 			    app_id, dev_id);
315 		return -EINVAL;
316 	}
317 
318 	info = bnxt_ulp_app_cap_list_get(&num);
319 	if (!info || !num) {
320 		BNXT_TF_DBG(ERR, "Failed to get app capabilities.\n");
321 		return -EINVAL;
322 	}
323 
324 	for (i = 0; i < num; i++) {
325 		if (info[i].app_id != app_id || info[i].device_id != dev_id)
326 			continue;
327 		found = true;
328 		if (info[i].flags & BNXT_ULP_APP_CAP_SHARED_EN)
329 			ulp_ctx->cfg_data->ulp_flags |=
330 				BNXT_ULP_SHARED_SESSION_ENABLED;
331 		if (info[i].flags & BNXT_ULP_APP_CAP_HOT_UPGRADE_EN)
332 			ulp_ctx->cfg_data->ulp_flags |=
333 				BNXT_ULP_HIGH_AVAIL_ENABLED;
334 		if (info[i].flags & BNXT_ULP_APP_CAP_UNICAST_ONLY)
335 			ulp_ctx->cfg_data->ulp_flags |=
336 				BNXT_ULP_APP_UNICAST_ONLY;
337 	}
338 	if (!found) {
339 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
340 			    app_id, dev_id);
341 		ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_APP_DEV_UNSUPPORTED;
342 		return -EINVAL;
343 	}
344 
345 	return 0;
346 }
347 
348 static void
349 ulp_ctx_shared_session_close(struct bnxt *bp,
350 			     struct bnxt_ulp_session_state *session)
351 {
352 	struct tf *tfp;
353 	int32_t rc;
354 
355 	if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
356 		return;
357 
358 	tfp = bnxt_ulp_cntxt_shared_tfp_get(bp->ulp_ctx);
359 	if (!tfp) {
360 		/*
361 		 * Log it under debug since this is likely a case of the
362 		 * shared session not being created.  For example, a failed
363 		 * initialization.
364 		 */
365 		BNXT_TF_DBG(DEBUG, "Failed to get shared tfp on close.\n");
366 		return;
367 	}
368 	rc = tf_close_session(tfp);
369 	if (rc)
370 		BNXT_TF_DBG(ERR, "Failed to close the shared session rc=%d.\n",
371 			    rc);
372 	(void)bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, NULL);
373 
374 	session->g_shared_tfp.session = NULL;
375 }
376 
377 static int32_t
378 ulp_ctx_shared_session_open(struct bnxt *bp,
379 			    struct bnxt_ulp_session_state *session)
380 {
381 	struct rte_eth_dev *ethdev = bp->eth_dev;
382 	struct tf_session_resources *resources;
383 	struct tf_open_session_parms parms;
384 	size_t copy_nbytes;
385 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
386 	int32_t	rc = 0;
387 
388 	/* only perform this if shared session is enabled. */
389 	if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
390 		return 0;
391 
392 	memset(&parms, 0, sizeof(parms));
393 
394 	rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
395 					  parms.ctrl_chan_name);
396 	if (rc) {
397 		BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
398 			    ethdev->data->port_id, rc);
399 		return rc;
400 	}
401 	resources = &parms.resources;
402 
403 	/*
404 	 * Need to account for size of ctrl_chan_name and 1 extra for Null
405 	 * terminator
406 	 */
407 	copy_nbytes = sizeof(parms.ctrl_chan_name) -
408 		strlen(parms.ctrl_chan_name) - 1;
409 
410 	/*
411 	 * Build the ctrl_chan_name with shared token.
412 	 * When HA is enabled, the WC TCAM needs extra management by the core,
413 	 * so add the wc_tcam string to the control channel.
414 	 */
415 	if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx))
416 		strncat(parms.ctrl_chan_name, "-tf_shared-wc_tcam",
417 			copy_nbytes);
418 	else
419 		strncat(parms.ctrl_chan_name, "-tf_shared", copy_nbytes);
420 
421 	rc = bnxt_ulp_tf_shared_session_resources_get(bp->ulp_ctx, resources);
422 	if (rc)
423 		return rc;
424 
425 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
426 	if (rc) {
427 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
428 		return rc;
429 	}
430 
431 	switch (ulp_dev_id) {
432 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
433 		parms.device_type = TF_DEVICE_TYPE_WH;
434 		break;
435 	case BNXT_ULP_DEVICE_ID_STINGRAY:
436 		parms.device_type = TF_DEVICE_TYPE_SR;
437 		break;
438 	case BNXT_ULP_DEVICE_ID_THOR:
439 		parms.device_type = TF_DEVICE_TYPE_THOR;
440 		break;
441 	default:
442 		BNXT_TF_DBG(ERR, "Unable to determine dev for opening session.\n");
443 		return rc;
444 	}
445 
446 	parms.shadow_copy = true;
447 	parms.bp = bp;
448 
449 	/*
450 	 * Open the session here, but the collect the resources during the
451 	 * mapper initialization.
452 	 */
453 	rc = tf_open_session(&bp->tfp_shared, &parms);
454 	if (rc)
455 		return rc;
456 
457 	if (parms.shared_session_creator)
458 		BNXT_TF_DBG(DEBUG, "Shared session creator.\n");
459 	else
460 		BNXT_TF_DBG(DEBUG, "Shared session attached.\n");
461 
462 	/* Save the shared session in global data */
463 	if (!session->g_shared_tfp.session)
464 		session->g_shared_tfp.session = bp->tfp_shared.session;
465 
466 	rc = bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, &bp->tfp_shared);
467 	if (rc)
468 		BNXT_TF_DBG(ERR, "Failed to add shared tfp to ulp (%d)\n", rc);
469 
470 	return rc;
471 }
472 
473 static int32_t
474 ulp_ctx_shared_session_attach(struct bnxt *bp,
475 			      struct bnxt_ulp_session_state *session)
476 {
477 	int32_t rc = 0;
478 
479 	/* Simply return success if shared session not enabled */
480 	if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
481 		bp->tfp_shared.session = session->g_shared_tfp.session;
482 		rc = ulp_ctx_shared_session_open(bp, session);
483 	}
484 
485 	return rc;
486 }
487 
488 static void
489 ulp_ctx_shared_session_detach(struct bnxt *bp)
490 {
491 	if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
492 		if (bp->tfp_shared.session) {
493 			tf_close_session(&bp->tfp_shared);
494 			bp->tfp_shared.session = NULL;
495 		}
496 	}
497 }
498 
499 /*
500  * Initialize an ULP session.
501  * An ULP session will contain all the resources needed to support rte flow
502  * offloads. A session is initialized as part of rte_eth_device start.
503  * A single vswitch instance can have multiple uplinks which means
504  * rte_eth_device start will be called for each of these devices.
505  * ULP session manager will make sure that a single ULP session is only
506  * initialized once. Apart from this, it also initializes MARK database,
507  * EEM table & flow database. ULP session manager also manages a list of
508  * all opened ULP sessions.
509  */
510 static int32_t
511 ulp_ctx_session_open(struct bnxt *bp,
512 		     struct bnxt_ulp_session_state *session)
513 {
514 	struct rte_eth_dev		*ethdev = bp->eth_dev;
515 	int32_t				rc = 0;
516 	struct tf_open_session_parms	params;
517 	struct tf_session_resources	*resources;
518 	uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
519 
520 	memset(&params, 0, sizeof(params));
521 
522 	rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
523 					  params.ctrl_chan_name);
524 	if (rc) {
525 		BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
526 			    ethdev->data->port_id, rc);
527 		return rc;
528 	}
529 
530 	params.shadow_copy = true;
531 
532 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
533 	if (rc) {
534 		BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
535 		return rc;
536 	}
537 
538 	switch (ulp_dev_id) {
539 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
540 		params.device_type = TF_DEVICE_TYPE_WH;
541 		break;
542 	case BNXT_ULP_DEVICE_ID_STINGRAY:
543 		params.device_type = TF_DEVICE_TYPE_SR;
544 		break;
545 	case BNXT_ULP_DEVICE_ID_THOR:
546 		params.device_type = TF_DEVICE_TYPE_THOR;
547 		break;
548 	default:
549 		BNXT_TF_DBG(ERR, "Unable to determine device for opening session.\n");
550 		return rc;
551 	}
552 
553 	resources = &params.resources;
554 	rc = bnxt_ulp_tf_resources_get(bp->ulp_ctx, resources);
555 	if (rc)
556 		return rc;
557 
558 	params.bp = bp;
559 	rc = tf_open_session(&bp->tfp, &params);
560 	if (rc) {
561 		BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
562 			    params.ctrl_chan_name, rc);
563 		return -EINVAL;
564 	}
565 	if (!session->session_opened) {
566 		session->session_opened = 1;
567 		session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
568 					     sizeof(struct tf), 0);
569 		session->g_tfp->session = bp->tfp.session;
570 	}
571 	return rc;
572 }
573 
574 /*
575  * Close the ULP session.
576  * It takes the ulp context pointer.
577  */
578 static void
579 ulp_ctx_session_close(struct bnxt *bp,
580 		      struct bnxt_ulp_session_state *session)
581 {
582 	/* close the session in the hardware */
583 	if (session->session_opened)
584 		tf_close_session(&bp->tfp);
585 	session->session_opened = 0;
586 	rte_free(session->g_tfp);
587 	session->g_tfp = NULL;
588 }
589 
590 static void
591 bnxt_init_tbl_scope_parms(struct bnxt *bp,
592 			  struct tf_alloc_tbl_scope_parms *params)
593 {
594 	struct bnxt_ulp_device_params	*dparms;
595 	uint32_t dev_id;
596 	int rc;
597 
598 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
599 	if (rc)
600 		/* TBD: For now, just use default. */
601 		dparms = 0;
602 	else
603 		dparms = bnxt_ulp_device_params_get(dev_id);
604 
605 	/*
606 	 * Set the flush timer for EEM entries. The value is in 100ms intervals,
607 	 * so 100 is 10s.
608 	 */
609 	params->hw_flow_cache_flush_timer = 100;
610 
611 	if (!dparms) {
612 		params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
613 		params->rx_max_action_entry_sz_in_bits =
614 			BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
615 		params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
616 		params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
617 
618 		params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
619 		params->tx_max_action_entry_sz_in_bits =
620 			BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
621 		params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
622 		params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
623 	} else {
624 		params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
625 		params->rx_max_action_entry_sz_in_bits =
626 			BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
627 		params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
628 		params->rx_num_flows_in_k =
629 			dparms->ext_flow_db_num_entries / 1024;
630 
631 		params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
632 		params->tx_max_action_entry_sz_in_bits =
633 			BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
634 		params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
635 		params->tx_num_flows_in_k =
636 			dparms->ext_flow_db_num_entries / 1024;
637 	}
638 	BNXT_TF_DBG(INFO, "Table Scope initialized with %uK flows.\n",
639 		    params->rx_num_flows_in_k);
640 }
641 
642 /* Initialize Extended Exact Match host memory. */
643 static int32_t
644 ulp_eem_tbl_scope_init(struct bnxt *bp)
645 {
646 	struct tf_alloc_tbl_scope_parms params = {0};
647 	struct bnxt_ulp_device_params *dparms;
648 	enum bnxt_ulp_flow_mem_type mtype;
649 	uint32_t dev_id;
650 	int rc;
651 
652 	/* Get the dev specific number of flows that needed to be supported. */
653 	if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
654 		BNXT_TF_DBG(ERR, "Invalid device id\n");
655 		return -EINVAL;
656 	}
657 
658 	dparms = bnxt_ulp_device_params_get(dev_id);
659 	if (!dparms) {
660 		BNXT_TF_DBG(ERR, "could not fetch the device params\n");
661 		return -ENODEV;
662 	}
663 
664 	if (bnxt_ulp_cntxt_mem_type_get(bp->ulp_ctx, &mtype))
665 		return -EINVAL;
666 	if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
667 		BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
668 		return 0;
669 	}
670 
671 	bnxt_init_tbl_scope_parms(bp, &params);
672 	rc = tf_alloc_tbl_scope(&bp->tfp, &params);
673 	if (rc) {
674 		BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
675 			    rc);
676 		return rc;
677 	}
678 	rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
679 	if (rc) {
680 		BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
681 		return rc;
682 	}
683 
684 	return 0;
685 }
686 
687 /* Free Extended Exact Match host memory */
688 static int32_t
689 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
690 {
691 	struct tf_free_tbl_scope_parms	params = {0};
692 	struct tf			*tfp;
693 	int32_t				rc = 0;
694 	struct bnxt_ulp_device_params *dparms;
695 	enum bnxt_ulp_flow_mem_type mtype;
696 	uint32_t dev_id;
697 
698 	if (!ulp_ctx || !ulp_ctx->cfg_data)
699 		return -EINVAL;
700 
701 	tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SHARED_SESSION_NO);
702 	if (!tfp) {
703 		BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
704 		return -EINVAL;
705 	}
706 
707 	/* Get the dev specific number of flows that needed to be supported. */
708 	if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
709 		BNXT_TF_DBG(ERR, "Invalid device id\n");
710 		return -EINVAL;
711 	}
712 
713 	dparms = bnxt_ulp_device_params_get(dev_id);
714 	if (!dparms) {
715 		BNXT_TF_DBG(ERR, "could not fetch the device params\n");
716 		return -ENODEV;
717 	}
718 
719 	if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
720 		return -EINVAL;
721 	if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
722 		BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
723 		return 0;
724 	}
725 
726 	rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, &params.tbl_scope_id);
727 	if (rc) {
728 		BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
729 		return -EINVAL;
730 	}
731 
732 	rc = tf_free_tbl_scope(tfp, &params);
733 	if (rc) {
734 		BNXT_TF_DBG(ERR, "Unable to free table scope\n");
735 		return -EINVAL;
736 	}
737 	return rc;
738 }
739 
740 /* The function to free and deinit the ulp context data. */
741 static int32_t
742 ulp_ctx_deinit(struct bnxt *bp,
743 	       struct bnxt_ulp_session_state *session)
744 {
745 	/* close the tf session */
746 	ulp_ctx_session_close(bp, session);
747 
748 	/* The shared session must be closed last. */
749 	ulp_ctx_shared_session_close(bp, session);
750 
751 	/* Free the contents */
752 	if (session->cfg_data) {
753 		rte_free(session->cfg_data);
754 		bp->ulp_ctx->cfg_data = NULL;
755 		session->cfg_data = NULL;
756 	}
757 	return 0;
758 }
759 
760 /* The function to allocate and initialize the ulp context data. */
761 static int32_t
762 ulp_ctx_init(struct bnxt *bp,
763 	     struct bnxt_ulp_session_state *session)
764 {
765 	struct bnxt_ulp_data	*ulp_data;
766 	int32_t			rc = 0;
767 	enum bnxt_ulp_device_id devid;
768 
769 	/* Initialize the context entries list */
770 	bnxt_ulp_cntxt_list_init();
771 
772 	/* Add the context to the context entries list */
773 	rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
774 	if (rc) {
775 		BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
776 		return -ENOMEM;
777 	}
778 
779 	/* Allocate memory to hold ulp context data. */
780 	ulp_data = rte_zmalloc("bnxt_ulp_data",
781 			       sizeof(struct bnxt_ulp_data), 0);
782 	if (!ulp_data) {
783 		BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
784 		return -ENOMEM;
785 	}
786 
787 	/* Increment the ulp context data reference count usage. */
788 	bp->ulp_ctx->cfg_data = ulp_data;
789 	session->cfg_data = ulp_data;
790 	ulp_data->ref_cnt++;
791 	ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
792 
793 	rc = bnxt_ulp_devid_get(bp, &devid);
794 	if (rc) {
795 		BNXT_TF_DBG(ERR, "Unable to determine device for ULP init.\n");
796 		goto error_deinit;
797 	}
798 
799 	rc = bnxt_ulp_cntxt_dev_id_set(bp->ulp_ctx, devid);
800 	if (rc) {
801 		BNXT_TF_DBG(ERR, "Unable to set device for ULP init.\n");
802 		goto error_deinit;
803 	}
804 
805 	rc = bnxt_ulp_cntxt_app_id_set(bp->ulp_ctx, bp->app_id);
806 	if (rc) {
807 		BNXT_TF_DBG(ERR, "Unable to set app_id for ULP init.\n");
808 		goto error_deinit;
809 	}
810 
811 	rc = bnxt_ulp_cntxt_app_caps_init(bp->ulp_ctx, bp->app_id, devid);
812 	if (rc) {
813 		BNXT_TF_DBG(ERR, "Unable to set caps for app(%x)/dev(%x)\n",
814 			    bp->app_id, devid);
815 		goto error_deinit;
816 	}
817 
818 	if (devid == BNXT_ULP_DEVICE_ID_THOR) {
819 		ulp_data->ulp_flags &= ~BNXT_ULP_VF_REP_ENABLED;
820 		BNXT_TF_DBG(ERR, "Enabled non-VFR mode\n");
821 	}
822 
823 	/*
824 	 * Shared session must be created before first regular session but after
825 	 * the ulp_ctx is valid.
826 	 */
827 	rc = ulp_ctx_shared_session_open(bp, session);
828 	if (rc) {
829 		BNXT_TF_DBG(ERR, "Unable to open shared session (%d)\n", rc);
830 		goto error_deinit;
831 	}
832 
833 	/* Open the ulp session. */
834 	rc = ulp_ctx_session_open(bp, session);
835 	if (rc)
836 		goto error_deinit;
837 
838 	ulp_tun_tbl_init(ulp_data->tun_tbl);
839 
840 	bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
841 	return rc;
842 
843 error_deinit:
844 	session->session_opened = 1;
845 	(void)ulp_ctx_deinit(bp, session);
846 	return rc;
847 }
848 
849 /* The function to initialize ulp dparms with devargs */
850 static int32_t
851 ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
852 {
853 	struct bnxt_ulp_device_params *dparms;
854 	uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST;
855 
856 	if (!bp->max_num_kflows) {
857 		/* Defaults to Internal */
858 		bnxt_ulp_cntxt_mem_type_set(ulp_ctx,
859 					    BNXT_ULP_FLOW_MEM_TYPE_INT);
860 		return 0;
861 	}
862 
863 	/* The max_num_kflows were set, so move to external */
864 	if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT))
865 		return -EINVAL;
866 
867 	if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
868 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
869 		return -EINVAL;
870 	}
871 
872 	dparms = bnxt_ulp_device_params_get(dev_id);
873 	if (!dparms) {
874 		BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
875 		return -EINVAL;
876 	}
877 
878 	/* num_flows = max_num_kflows * 1024 */
879 	dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
880 	/* GFID =  2 * num_flows */
881 	dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
882 	BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n",
883 		    dparms->ext_flow_db_num_entries);
884 
885 	return 0;
886 }
887 
888 /* The function to initialize bp flags with truflow features */
889 static int32_t
890 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
891 				struct bnxt_ulp_context *ulp_ctx)
892 {
893 	enum bnxt_ulp_flow_mem_type mtype;
894 
895 	if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
896 		return -EINVAL;
897 	/* Update the bp flag with gfid flag */
898 	if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT)
899 		bp->flags |= BNXT_FLAG_GFID_ENABLE;
900 
901 	return 0;
902 }
903 
904 static int32_t
905 ulp_ctx_attach(struct bnxt *bp,
906 	       struct bnxt_ulp_session_state *session)
907 {
908 	int32_t rc = 0;
909 	uint32_t flags, dev_id = BNXT_ULP_DEVICE_ID_LAST;
910 	uint8_t app_id;
911 
912 	/* Increment the ulp context data reference count usage. */
913 	bp->ulp_ctx->cfg_data = session->cfg_data;
914 	bp->ulp_ctx->cfg_data->ref_cnt++;
915 
916 	/* update the session details in bnxt tfp */
917 	bp->tfp.session = session->g_tfp->session;
918 
919 	/* Add the context to the context entries list */
920 	rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
921 	if (rc) {
922 		BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
923 		return -EINVAL;
924 	}
925 
926 	/*
927 	 * The supported flag will be set during the init. Use it now to
928 	 * know if we should go through the attach.
929 	 */
930 	rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
931 	if (rc) {
932 		BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
933 		return -EINVAL;
934 	}
935 
936 	rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
937 	if (rc) {
938 		BNXT_TF_DBG(ERR, "Unable do get the dev_id.\n");
939 		return -EINVAL;
940 	}
941 
942 	flags = bp->ulp_ctx->cfg_data->ulp_flags;
943 	if (ULP_APP_DEV_UNSUPPORTED_ENABLED(flags)) {
944 		BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
945 			    app_id, dev_id);
946 		return -EINVAL;
947 	}
948 
949 	/* Create a TF Client */
950 	rc = ulp_ctx_session_open(bp, session);
951 	if (rc) {
952 		PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
953 		bp->tfp.session = NULL;
954 		return rc;
955 	}
956 
957 	bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
958 	return rc;
959 }
960 
961 static void
962 ulp_ctx_detach(struct bnxt *bp)
963 {
964 	if (bp->tfp.session) {
965 		tf_close_session(&bp->tfp);
966 		bp->tfp.session = NULL;
967 	}
968 }
969 
970 /*
971  * Initialize the state of an ULP session.
972  * If the state of an ULP session is not initialized, set it's state to
973  * initialized. If the state is already initialized, do nothing.
974  */
975 static void
976 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
977 {
978 	pthread_mutex_lock(&session->bnxt_ulp_mutex);
979 
980 	if (!session->bnxt_ulp_init) {
981 		session->bnxt_ulp_init = true;
982 		*init = false;
983 	} else {
984 		*init = true;
985 	}
986 
987 	pthread_mutex_unlock(&session->bnxt_ulp_mutex);
988 }
989 
990 /*
991  * Check if an ULP session is already allocated for a specific PCI
992  * domain & bus. If it is already allocated simply return the session
993  * pointer, otherwise allocate a new session.
994  */
995 static struct bnxt_ulp_session_state *
996 ulp_get_session(struct rte_pci_addr *pci_addr)
997 {
998 	struct bnxt_ulp_session_state *session;
999 
1000 	STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
1001 		if (session->pci_info.domain == pci_addr->domain &&
1002 		    session->pci_info.bus == pci_addr->bus) {
1003 			return session;
1004 		}
1005 	}
1006 	return NULL;
1007 }
1008 
1009 /*
1010  * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
1011  * If it's already initialized simply return the already existing session.
1012  */
1013 static struct bnxt_ulp_session_state *
1014 ulp_session_init(struct bnxt *bp,
1015 		 bool *init)
1016 {
1017 	struct rte_pci_device		*pci_dev;
1018 	struct rte_pci_addr		*pci_addr;
1019 	struct bnxt_ulp_session_state	*session;
1020 	int rc = 0;
1021 
1022 	if (!bp)
1023 		return NULL;
1024 
1025 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1026 	pci_addr = &pci_dev->addr;
1027 
1028 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
1029 
1030 	session = ulp_get_session(pci_addr);
1031 	if (!session) {
1032 		/* Not Found the session  Allocate a new one */
1033 		session = rte_zmalloc("bnxt_ulp_session",
1034 				      sizeof(struct bnxt_ulp_session_state),
1035 				      0);
1036 		if (!session) {
1037 			BNXT_TF_DBG(ERR,
1038 				    "Allocation failed for bnxt_ulp_session\n");
1039 			pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1040 			return NULL;
1041 
1042 		} else {
1043 			/* Add it to the queue */
1044 			session->pci_info.domain = pci_addr->domain;
1045 			session->pci_info.bus = pci_addr->bus;
1046 			rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
1047 			if (rc) {
1048 				BNXT_TF_DBG(ERR, "mutex create failed\n");
1049 				pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1050 				return NULL;
1051 			}
1052 			STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
1053 					   session, next);
1054 		}
1055 	}
1056 	ulp_context_initialized(session, init);
1057 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1058 	return session;
1059 }
1060 
1061 /*
1062  * When a device is closed, remove it's associated session from the global
1063  * session list.
1064  */
1065 static void
1066 ulp_session_deinit(struct bnxt_ulp_session_state *session)
1067 {
1068 	if (!session)
1069 		return;
1070 
1071 	if (!session->cfg_data) {
1072 		pthread_mutex_lock(&bnxt_ulp_global_mutex);
1073 		STAILQ_REMOVE(&bnxt_ulp_session_list, session,
1074 			      bnxt_ulp_session_state, next);
1075 		pthread_mutex_destroy(&session->bnxt_ulp_mutex);
1076 		rte_free(session);
1077 		pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1078 	}
1079 }
1080 
1081 /*
1082  * Internal api to enable NAT feature.
1083  * Set set_flag to 1 to set the value or zero to reset the value.
1084  * returns 0 on success.
1085  */
1086 static int32_t
1087 bnxt_ulp_global_cfg_update(struct bnxt *bp,
1088 			   enum tf_dir dir,
1089 			   enum tf_global_config_type type,
1090 			   uint32_t offset,
1091 			   uint32_t value,
1092 			   uint32_t set_flag)
1093 {
1094 	uint32_t global_cfg = 0;
1095 	int rc;
1096 	struct tf_global_cfg_parms parms = { 0 };
1097 
1098 	/* Initialize the params */
1099 	parms.dir = dir,
1100 	parms.type = type,
1101 	parms.offset = offset,
1102 	parms.config = (uint8_t *)&global_cfg,
1103 	parms.config_sz_in_bytes = sizeof(global_cfg);
1104 
1105 	rc = tf_get_global_cfg(&bp->tfp, &parms);
1106 	if (rc) {
1107 		BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
1108 			    type, rc);
1109 		return rc;
1110 	}
1111 
1112 	if (set_flag)
1113 		global_cfg |= value;
1114 	else
1115 		global_cfg &= ~value;
1116 
1117 	/* SET the register RE_CFA_REG_ACT_TECT */
1118 	rc = tf_set_global_cfg(&bp->tfp, &parms);
1119 	if (rc) {
1120 		BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
1121 			    type, rc);
1122 		return rc;
1123 	}
1124 	return rc;
1125 }
1126 
1127 /* Internal function to delete all the flows belonging to the given port */
1128 static void
1129 bnxt_ulp_flush_port_flows(struct bnxt *bp)
1130 {
1131 	uint16_t func_id;
1132 
1133 	/* it is assumed that port is either TVF or PF */
1134 	if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
1135 					 bp->eth_dev->data->port_id,
1136 					 &func_id)) {
1137 		BNXT_TF_DBG(ERR, "Invalid argument\n");
1138 		return;
1139 	}
1140 	(void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
1141 }
1142 
1143 /* Internal function to delete the VFR default flows */
1144 static void
1145 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
1146 {
1147 	struct bnxt_ulp_vfr_rule_info *info;
1148 	uint16_t port_id;
1149 	struct rte_eth_dev *vfr_eth_dev;
1150 	struct bnxt_representor *vfr_bp;
1151 
1152 	if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
1153 		return;
1154 
1155 	if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1156 		return;
1157 
1158 	/* Delete default rules for all ports */
1159 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
1160 		info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
1161 		if (!info->valid)
1162 			continue;
1163 
1164 		if (!global && info->parent_port_id !=
1165 		    bp->eth_dev->data->port_id)
1166 			continue;
1167 
1168 		/* Destroy the flows */
1169 		ulp_default_flow_destroy(bp->eth_dev, info->vfr_flow_id);
1170 		/* Clean up the tx action pointer */
1171 		vfr_eth_dev = &rte_eth_devices[port_id];
1172 		if (vfr_eth_dev) {
1173 			vfr_bp = vfr_eth_dev->data->dev_private;
1174 			vfr_bp->vfr_tx_cfa_action = 0;
1175 		}
1176 		memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
1177 	}
1178 }
1179 
1180 /*
1181  * When a port is deinit'ed by dpdk. This function is called
1182  * and this function clears the ULP context and rest of the
1183  * infrastructure associated with it.
1184  */
1185 static void
1186 bnxt_ulp_deinit(struct bnxt *bp,
1187 		struct bnxt_ulp_session_state *session)
1188 {
1189 	bool ha_enabled;
1190 
1191 	if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1192 		return;
1193 
1194 	ha_enabled = bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx);
1195 	if (ha_enabled && session->session_opened) {
1196 		int32_t rc = ulp_ha_mgr_close(bp->ulp_ctx);
1197 		if (rc)
1198 			BNXT_TF_DBG(ERR, "Failed to close HA (%d)\n", rc);
1199 	}
1200 
1201 	/* clean up default flows */
1202 	bnxt_ulp_destroy_df_rules(bp, true);
1203 
1204 	/* clean up default VFR flows */
1205 	bnxt_ulp_destroy_vfr_default_rules(bp, true);
1206 
1207 	/* clean up regular flows */
1208 	ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR);
1209 
1210 	/* cleanup the eem table scope */
1211 	ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
1212 
1213 	/* cleanup the flow database */
1214 	ulp_flow_db_deinit(bp->ulp_ctx);
1215 
1216 	/* Delete the Mark database */
1217 	ulp_mark_db_deinit(bp->ulp_ctx);
1218 
1219 	/* cleanup the ulp mapper */
1220 	ulp_mapper_deinit(bp->ulp_ctx);
1221 
1222 	/* Delete the Flow Counter Manager */
1223 	ulp_fc_mgr_deinit(bp->ulp_ctx);
1224 
1225 	/* Delete the Port database */
1226 	ulp_port_db_deinit(bp->ulp_ctx);
1227 
1228 	/* Disable NAT feature */
1229 	(void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1230 					 TF_TUNNEL_ENCAP_NAT,
1231 					 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1232 
1233 	(void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1234 					 TF_TUNNEL_ENCAP_NAT,
1235 					 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1236 
1237 	/* free the flow db lock */
1238 	pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
1239 
1240 	if (ha_enabled)
1241 		ulp_ha_mgr_deinit(bp->ulp_ctx);
1242 
1243 	/* Delete the ulp context and tf session and free the ulp context */
1244 	ulp_ctx_deinit(bp, session);
1245 	BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
1246 }
1247 
1248 /*
1249  * When a port is initialized by dpdk. This functions is called
1250  * and this function initializes the ULP context and rest of the
1251  * infrastructure associated with it.
1252  */
1253 static int32_t
1254 bnxt_ulp_init(struct bnxt *bp,
1255 	      struct bnxt_ulp_session_state *session)
1256 {
1257 	int rc;
1258 
1259 	/* Allocate and Initialize the ulp context. */
1260 	rc = ulp_ctx_init(bp, session);
1261 	if (rc) {
1262 		BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
1263 		goto jump_to_error;
1264 	}
1265 
1266 	rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
1267 	if (rc) {
1268 		BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
1269 		goto jump_to_error;
1270 	}
1271 
1272 	/* Initialize ulp dparms with values devargs passed */
1273 	rc = ulp_dparms_init(bp, bp->ulp_ctx);
1274 	if (rc) {
1275 		BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
1276 		goto jump_to_error;
1277 	}
1278 
1279 	/* create the port database */
1280 	rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
1281 	if (rc) {
1282 		BNXT_TF_DBG(ERR, "Failed to create the port database\n");
1283 		goto jump_to_error;
1284 	}
1285 
1286 	/* Create the Mark database. */
1287 	rc = ulp_mark_db_init(bp->ulp_ctx);
1288 	if (rc) {
1289 		BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
1290 		goto jump_to_error;
1291 	}
1292 
1293 	/* Create the flow database. */
1294 	rc = ulp_flow_db_init(bp->ulp_ctx);
1295 	if (rc) {
1296 		BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
1297 		goto jump_to_error;
1298 	}
1299 
1300 	/* Create the eem table scope. */
1301 	rc = ulp_eem_tbl_scope_init(bp);
1302 	if (rc) {
1303 		BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
1304 		goto jump_to_error;
1305 	}
1306 
1307 	rc = ulp_mapper_init(bp->ulp_ctx);
1308 	if (rc) {
1309 		BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
1310 		goto jump_to_error;
1311 	}
1312 
1313 	rc = ulp_fc_mgr_init(bp->ulp_ctx);
1314 	if (rc) {
1315 		BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
1316 		goto jump_to_error;
1317 	}
1318 
1319 	/*
1320 	 * Enable NAT feature. Set the global configuration register
1321 	 * Tunnel encap to enable NAT with the reuse of existing inner
1322 	 * L2 header smac and dmac
1323 	 */
1324 	rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1325 					TF_TUNNEL_ENCAP_NAT,
1326 					BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1327 	if (rc) {
1328 		BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
1329 		goto jump_to_error;
1330 	}
1331 
1332 	rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1333 					TF_TUNNEL_ENCAP_NAT,
1334 					BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1335 	if (rc) {
1336 		BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
1337 		goto jump_to_error;
1338 	}
1339 
1340 	if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx)) {
1341 		rc = ulp_ha_mgr_init(bp->ulp_ctx);
1342 		if (rc) {
1343 			BNXT_TF_DBG(ERR, "Failed to initialize HA %d\n", rc);
1344 			goto jump_to_error;
1345 		}
1346 		rc = ulp_ha_mgr_open(bp->ulp_ctx);
1347 		if (rc) {
1348 			BNXT_TF_DBG(ERR, "Failed to Process HA Open %d\n", rc);
1349 			goto jump_to_error;
1350 		}
1351 	}
1352 	BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
1353 	return rc;
1354 
1355 jump_to_error:
1356 	bnxt_ulp_deinit(bp, session);
1357 	return rc;
1358 }
1359 
1360 /*
1361  * When a port is initialized by dpdk. This functions sets up
1362  * the port specific details.
1363  */
1364 int32_t
1365 bnxt_ulp_port_init(struct bnxt *bp)
1366 {
1367 	struct bnxt_ulp_session_state *session;
1368 	bool initialized;
1369 	enum bnxt_ulp_device_id devid = BNXT_ULP_DEVICE_ID_LAST;
1370 	uint32_t ulp_flags;
1371 	int32_t rc = 0;
1372 
1373 	if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1374 		BNXT_TF_DBG(ERR,
1375 			    "Skip ulp init for port: %d, not a TVF or PF\n",
1376 			    bp->eth_dev->data->port_id);
1377 		return rc;
1378 	}
1379 
1380 	if (!BNXT_TRUFLOW_EN(bp)) {
1381 		BNXT_TF_DBG(DEBUG,
1382 			    "Skip ulp init for port: %d, truflow is not enabled\n",
1383 			    bp->eth_dev->data->port_id);
1384 		return rc;
1385 	}
1386 
1387 	if (bp->ulp_ctx) {
1388 		BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
1389 		return rc;
1390 	}
1391 
1392 	bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
1393 				  sizeof(struct bnxt_ulp_context), 0);
1394 	if (!bp->ulp_ctx) {
1395 		BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
1396 		return -ENOMEM;
1397 	}
1398 
1399 	/*
1400 	 * Multiple uplink ports can be associated with a single vswitch.
1401 	 * Make sure only the port that is started first will initialize
1402 	 * the TF session.
1403 	 */
1404 	session = ulp_session_init(bp, &initialized);
1405 	if (!session) {
1406 		BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
1407 		rc = -EIO;
1408 		goto jump_to_error;
1409 	}
1410 
1411 	if (initialized) {
1412 		/*
1413 		 * If ULP is already initialized for a specific domain then
1414 		 * simply assign the ulp context to this rte_eth_dev.
1415 		 */
1416 		rc = ulp_ctx_attach(bp, session);
1417 		if (rc) {
1418 			BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
1419 			goto jump_to_error;
1420 		}
1421 
1422 		/*
1423 		 * Attach to the shared session, must be called after the
1424 		 * ulp_ctx_attach in order to ensure that ulp data is available
1425 		 * for attaching.
1426 		 */
1427 		rc = ulp_ctx_shared_session_attach(bp, session);
1428 		if (rc) {
1429 			BNXT_TF_DBG(ERR,
1430 				    "Failed attach to shared session (%d)", rc);
1431 			goto jump_to_error;
1432 		}
1433 	} else {
1434 		rc = bnxt_ulp_init(bp, session);
1435 		if (rc) {
1436 			BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
1437 			goto jump_to_error;
1438 		}
1439 	}
1440 
1441 	/* Update bnxt driver flags */
1442 	rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
1443 	if (rc) {
1444 		BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
1445 		goto jump_to_error;
1446 	}
1447 
1448 	/* update the port database for the given interface */
1449 	rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
1450 	if (rc) {
1451 		BNXT_TF_DBG(ERR, "Failed to update port database\n");
1452 		goto jump_to_error;
1453 	}
1454 	/* create the default rules */
1455 	rc = bnxt_ulp_create_df_rules(bp);
1456 	if (rc) {
1457 		BNXT_TF_DBG(ERR, "Failed to create default flow\n");
1458 		goto jump_to_error;
1459 	}
1460 
1461 	rc = bnxt_ulp_devid_get(bp, &devid);
1462 	if (rc) {
1463 		BNXT_TF_DBG(ERR, "Unable to determine device for ULP port init.\n");
1464 		goto jump_to_error;
1465 	}
1466 
1467 	if (devid != BNXT_ULP_DEVICE_ID_THOR && BNXT_ACCUM_STATS_EN(bp))
1468 		bp->ulp_ctx->cfg_data->accum_stats = true;
1469 
1470 	BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init, accum_stats:%d\n",
1471 		    bp->eth_dev->data->port_id,
1472 		    bp->ulp_ctx->cfg_data->accum_stats);
1473 
1474 	/* set the unicast mode */
1475 	if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(bp->ulp_ctx, &ulp_flags)) {
1476 		BNXT_TF_DBG(ERR, "Error in getting ULP context flags\n");
1477 		goto jump_to_error;
1478 	}
1479 	if (ulp_flags & BNXT_ULP_APP_UNICAST_ONLY) {
1480 		if (bnxt_pmd_set_unicast_rxmask(bp->eth_dev)) {
1481 			BNXT_TF_DBG(ERR, "Error in setting unicast rxmode\n");
1482 			goto jump_to_error;
1483 		}
1484 	}
1485 
1486 	return rc;
1487 
1488 jump_to_error:
1489 	bnxt_ulp_port_deinit(bp);
1490 	return rc;
1491 }
1492 
1493 /*
1494  * When a port is de-initialized by dpdk. This functions clears up
1495  * the port specific details.
1496  */
1497 void
1498 bnxt_ulp_port_deinit(struct bnxt *bp)
1499 {
1500 	struct bnxt_ulp_session_state *session;
1501 	struct rte_pci_device *pci_dev;
1502 	struct rte_pci_addr *pci_addr;
1503 
1504 	if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1505 		BNXT_TF_DBG(ERR,
1506 			    "Skip ULP deinit port:%d, not a TVF or PF\n",
1507 			    bp->eth_dev->data->port_id);
1508 		return;
1509 	}
1510 
1511 	if (!BNXT_TRUFLOW_EN(bp)) {
1512 		BNXT_TF_DBG(DEBUG,
1513 			    "Skip ULP deinit for port:%d, truflow is not enabled\n",
1514 			    bp->eth_dev->data->port_id);
1515 		return;
1516 	}
1517 
1518 	if (!bp->ulp_ctx) {
1519 		BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
1520 		return;
1521 	}
1522 
1523 	BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
1524 		    bp->eth_dev->data->port_id);
1525 
1526 	/* Free the ulp context in the context entry list */
1527 	bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
1528 
1529 	/* Get the session details  */
1530 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1531 	pci_addr = &pci_dev->addr;
1532 	pthread_mutex_lock(&bnxt_ulp_global_mutex);
1533 	session = ulp_get_session(pci_addr);
1534 	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1535 
1536 	/* session not found then just exit */
1537 	if (!session) {
1538 		/* Free the ulp context */
1539 		rte_free(bp->ulp_ctx);
1540 		bp->ulp_ctx = NULL;
1541 		return;
1542 	}
1543 
1544 	/* Check the reference count to deinit or deattach*/
1545 	if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
1546 		bp->ulp_ctx->cfg_data->ref_cnt--;
1547 		if (bp->ulp_ctx->cfg_data->ref_cnt) {
1548 			/* free the port details */
1549 			/* Free the default flow rule associated to this port */
1550 			bnxt_ulp_destroy_df_rules(bp, false);
1551 			bnxt_ulp_destroy_vfr_default_rules(bp, false);
1552 
1553 			/* free flows associated with this port */
1554 			bnxt_ulp_flush_port_flows(bp);
1555 
1556 			/* close the session associated with this port */
1557 			ulp_ctx_detach(bp);
1558 
1559 			/* always detach/close shared after the session. */
1560 			ulp_ctx_shared_session_detach(bp);
1561 		} else {
1562 			/* Perform ulp ctx deinit */
1563 			bnxt_ulp_deinit(bp, session);
1564 		}
1565 	}
1566 
1567 	/* clean up the session */
1568 	ulp_session_deinit(session);
1569 
1570 	/* Free the ulp context */
1571 	rte_free(bp->ulp_ctx);
1572 	bp->ulp_ctx = NULL;
1573 }
1574 
1575 /* Below are the access functions to access internal data of ulp context. */
1576 /* Function to set the Mark DB into the context */
1577 int32_t
1578 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
1579 				struct bnxt_ulp_mark_tbl *mark_tbl)
1580 {
1581 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1582 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1583 		return -EINVAL;
1584 	}
1585 
1586 	ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1587 
1588 	return 0;
1589 }
1590 
1591 /* Function to retrieve the Mark DB from the context. */
1592 struct bnxt_ulp_mark_tbl *
1593 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1594 {
1595 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1596 		return NULL;
1597 
1598 	return ulp_ctx->cfg_data->mark_tbl;
1599 }
1600 
1601 bool
1602 bnxt_ulp_cntxt_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx)
1603 {
1604 	return ULP_SHARED_SESSION_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
1605 }
1606 
1607 int32_t
1608 bnxt_ulp_cntxt_app_id_set(struct bnxt_ulp_context *ulp_ctx, uint8_t app_id)
1609 {
1610 	if (!ulp_ctx)
1611 		return -EINVAL;
1612 	ulp_ctx->cfg_data->app_id = app_id;
1613 	return 0;
1614 }
1615 
1616 int32_t
1617 bnxt_ulp_cntxt_app_id_get(struct bnxt_ulp_context *ulp_ctx, uint8_t *app_id)
1618 {
1619 	/* Default APP id is zero */
1620 	if (!ulp_ctx || !app_id)
1621 		return -EINVAL;
1622 	*app_id = ulp_ctx->cfg_data->app_id;
1623 	return 0;
1624 }
1625 
1626 /* Function to set the device id of the hardware. */
1627 int32_t
1628 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1629 			  uint32_t dev_id)
1630 {
1631 	if (ulp_ctx && ulp_ctx->cfg_data) {
1632 		ulp_ctx->cfg_data->dev_id = dev_id;
1633 		return 0;
1634 	}
1635 
1636 	return -EINVAL;
1637 }
1638 
1639 /* Function to get the device id of the hardware. */
1640 int32_t
1641 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1642 			  uint32_t *dev_id)
1643 {
1644 	if (ulp_ctx && ulp_ctx->cfg_data) {
1645 		*dev_id = ulp_ctx->cfg_data->dev_id;
1646 		return 0;
1647 	}
1648 	*dev_id = BNXT_ULP_DEVICE_ID_LAST;
1649 	BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
1650 	return -EINVAL;
1651 }
1652 
1653 int32_t
1654 bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx,
1655 			    enum bnxt_ulp_flow_mem_type mem_type)
1656 {
1657 	if (ulp_ctx && ulp_ctx->cfg_data) {
1658 		ulp_ctx->cfg_data->mem_type = mem_type;
1659 		return 0;
1660 	}
1661 	BNXT_TF_DBG(ERR, "Failed to write mem_type in ulp ctxt\n");
1662 	return -EINVAL;
1663 }
1664 
1665 int32_t
1666 bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx,
1667 			    enum bnxt_ulp_flow_mem_type *mem_type)
1668 {
1669 	if (ulp_ctx && ulp_ctx->cfg_data) {
1670 		*mem_type = ulp_ctx->cfg_data->mem_type;
1671 		return 0;
1672 	}
1673 	*mem_type = BNXT_ULP_FLOW_MEM_TYPE_LAST;
1674 	BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
1675 	return -EINVAL;
1676 }
1677 
1678 /* Function to get the table scope id of the EEM table. */
1679 int32_t
1680 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1681 				uint32_t *tbl_scope_id)
1682 {
1683 	if (ulp_ctx && ulp_ctx->cfg_data) {
1684 		*tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1685 		return 0;
1686 	}
1687 
1688 	return -EINVAL;
1689 }
1690 
1691 /* Function to set the table scope id of the EEM table. */
1692 int32_t
1693 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1694 				uint32_t tbl_scope_id)
1695 {
1696 	if (ulp_ctx && ulp_ctx->cfg_data) {
1697 		ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1698 		return 0;
1699 	}
1700 
1701 	return -EINVAL;
1702 }
1703 
1704 /* Function to set the shared tfp session details from the ulp context. */
1705 int32_t
1706 bnxt_ulp_cntxt_shared_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1707 {
1708 	if (!ulp) {
1709 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1710 		return -EINVAL;
1711 	}
1712 
1713 	if (tfp == NULL) {
1714 		if (ulp->cfg_data->num_shared_clients > 0)
1715 			ulp->cfg_data->num_shared_clients--;
1716 	} else {
1717 		ulp->cfg_data->num_shared_clients++;
1718 	}
1719 
1720 	ulp->g_shared_tfp = tfp;
1721 	return 0;
1722 }
1723 
1724 /* Function to get the shared tfp session details from the ulp context. */
1725 struct tf *
1726 bnxt_ulp_cntxt_shared_tfp_get(struct bnxt_ulp_context *ulp)
1727 {
1728 	if (!ulp) {
1729 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1730 		return NULL;
1731 	}
1732 	return ulp->g_shared_tfp;
1733 }
1734 
1735 /* Function to get the number of shared clients attached */
1736 uint8_t
1737 bnxt_ulp_cntxt_num_shared_clients_get(struct bnxt_ulp_context *ulp)
1738 {
1739 	if (ulp == NULL || ulp->cfg_data == NULL) {
1740 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1741 		return 0;
1742 	}
1743 	return ulp->cfg_data->num_shared_clients;
1744 }
1745 
1746 /* Function to set the tfp session details from the ulp context. */
1747 int32_t
1748 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1749 {
1750 	if (!ulp) {
1751 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1752 		return -EINVAL;
1753 	}
1754 
1755 	ulp->g_tfp = tfp;
1756 	return 0;
1757 }
1758 
1759 /* Function to get the tfp session details from the ulp context. */
1760 struct tf *
1761 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp,
1762 		       enum bnxt_ulp_shared_session shared)
1763 {
1764 	if (!ulp) {
1765 		BNXT_TF_DBG(ERR, "Invalid arguments\n");
1766 		return NULL;
1767 	}
1768 	if (shared)
1769 		return ulp->g_shared_tfp;
1770 	else
1771 		return ulp->g_tfp;
1772 }
1773 
1774 /*
1775  * Get the device table entry based on the device id.
1776  *
1777  * dev_id [in] The device id of the hardware
1778  *
1779  * Returns the pointer to the device parameters.
1780  */
1781 struct bnxt_ulp_device_params *
1782 bnxt_ulp_device_params_get(uint32_t dev_id)
1783 {
1784 	if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1785 		return &ulp_device_params[dev_id];
1786 	return NULL;
1787 }
1788 
1789 /* Function to set the flow database to the ulp context. */
1790 int32_t
1791 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context	*ulp_ctx,
1792 				struct bnxt_ulp_flow_db	*flow_db)
1793 {
1794 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1795 		return -EINVAL;
1796 
1797 	ulp_ctx->cfg_data->flow_db = flow_db;
1798 	return 0;
1799 }
1800 
1801 /* Function to get the flow database from the ulp context. */
1802 struct bnxt_ulp_flow_db	*
1803 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context	*ulp_ctx)
1804 {
1805 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1806 		return NULL;
1807 
1808 	return ulp_ctx->cfg_data->flow_db;
1809 }
1810 
1811 /* Function to get the tunnel cache table info from the ulp context. */
1812 struct bnxt_tun_cache_entry *
1813 bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
1814 {
1815 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1816 		return NULL;
1817 
1818 	return ulp_ctx->cfg_data->tun_tbl;
1819 }
1820 
1821 /* Function to get the ulp context from eth device. */
1822 struct bnxt_ulp_context	*
1823 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev	*dev)
1824 {
1825 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1826 
1827 	if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1828 		struct bnxt_representor *vfr = dev->data->dev_private;
1829 
1830 		bp = vfr->parent_dev->data->dev_private;
1831 	}
1832 
1833 	if (!bp) {
1834 		BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1835 		return NULL;
1836 	}
1837 	return bp->ulp_ctx;
1838 }
1839 
1840 int32_t
1841 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1842 				    void *mapper_data)
1843 {
1844 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1845 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1846 		return -EINVAL;
1847 	}
1848 
1849 	ulp_ctx->cfg_data->mapper_data = mapper_data;
1850 	return 0;
1851 }
1852 
1853 void *
1854 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1855 {
1856 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1857 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1858 		return NULL;
1859 	}
1860 
1861 	return ulp_ctx->cfg_data->mapper_data;
1862 }
1863 
1864 /* Function to set the port database to the ulp context. */
1865 int32_t
1866 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context	*ulp_ctx,
1867 				struct bnxt_ulp_port_db	*port_db)
1868 {
1869 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1870 		return -EINVAL;
1871 
1872 	ulp_ctx->cfg_data->port_db = port_db;
1873 	return 0;
1874 }
1875 
1876 /* Function to get the port database from the ulp context. */
1877 struct bnxt_ulp_port_db *
1878 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context	*ulp_ctx)
1879 {
1880 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1881 		return NULL;
1882 
1883 	return ulp_ctx->cfg_data->port_db;
1884 }
1885 
1886 /* Function to set the flow counter info into the context */
1887 int32_t
1888 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1889 				struct bnxt_ulp_fc_info *ulp_fc_info)
1890 {
1891 	if (!ulp_ctx || !ulp_ctx->cfg_data) {
1892 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1893 		return -EINVAL;
1894 	}
1895 
1896 	ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1897 
1898 	return 0;
1899 }
1900 
1901 /* Function to retrieve the flow counter info from the context. */
1902 struct bnxt_ulp_fc_info *
1903 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1904 {
1905 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1906 		return NULL;
1907 
1908 	return ulp_ctx->cfg_data->fc_info;
1909 }
1910 
1911 /* Function to get the ulp flags from the ulp context. */
1912 int32_t
1913 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1914 				  uint32_t *flags)
1915 {
1916 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1917 		return -1;
1918 
1919 	*flags =  ulp_ctx->cfg_data->ulp_flags;
1920 	return 0;
1921 }
1922 
1923 /* Function to get the ulp vfr info from the ulp context. */
1924 struct bnxt_ulp_vfr_rule_info*
1925 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1926 				     uint32_t port_id)
1927 {
1928 	if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1929 		return NULL;
1930 
1931 	return &ulp_ctx->cfg_data->vfr_rule_info[port_id];
1932 }
1933 
1934 /* Function to acquire the flow database lock from the ulp context. */
1935 int32_t
1936 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
1937 {
1938 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1939 		return -1;
1940 
1941 	if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
1942 		BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
1943 		return -1;
1944 	}
1945 	return 0;
1946 }
1947 
1948 /* Function to release the flow database lock from the ulp context. */
1949 void
1950 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
1951 {
1952 	if (!ulp_ctx || !ulp_ctx->cfg_data)
1953 		return;
1954 
1955 	pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
1956 }
1957 
1958 /* Function to set the ha info into the context */
1959 int32_t
1960 bnxt_ulp_cntxt_ptr2_ha_info_set(struct bnxt_ulp_context *ulp_ctx,
1961 				struct bnxt_ulp_ha_mgr_info *ulp_ha_info)
1962 {
1963 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL) {
1964 		BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1965 		return -EINVAL;
1966 	}
1967 	ulp_ctx->cfg_data->ha_info = ulp_ha_info;
1968 	return 0;
1969 }
1970 
1971 /* Function to retrieve the ha info from the context. */
1972 struct bnxt_ulp_ha_mgr_info *
1973 bnxt_ulp_cntxt_ptr2_ha_info_get(struct bnxt_ulp_context *ulp_ctx)
1974 {
1975 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
1976 		return NULL;
1977 	return ulp_ctx->cfg_data->ha_info;
1978 }
1979 
1980 bool
1981 bnxt_ulp_cntxt_ha_enabled(struct bnxt_ulp_context *ulp_ctx)
1982 {
1983 	if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
1984 		return false;
1985 	return !!ULP_HIGH_AVAIL_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
1986 }
1987 
1988 static int32_t
1989 bnxt_ulp_cntxt_list_init(void)
1990 {
1991 	/* Create the cntxt spin lock */
1992 	rte_spinlock_init(&bnxt_ulp_ctxt_lock);
1993 
1994 	return 0;
1995 }
1996 
1997 static int32_t
1998 bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx)
1999 {
2000 	struct ulp_context_list_entry	*entry;
2001 
2002 	entry = rte_zmalloc(NULL, sizeof(struct ulp_context_list_entry), 0);
2003 	if (entry == NULL) {
2004 		BNXT_TF_DBG(ERR, "unable to allocate memory\n");
2005 		return -ENOMEM;
2006 	}
2007 
2008 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2009 	entry->ulp_ctx = ulp_ctx;
2010 	TAILQ_INSERT_TAIL(&ulp_cntx_list, entry, next);
2011 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2012 	return 0;
2013 }
2014 
2015 static void
2016 bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx)
2017 {
2018 	struct ulp_context_list_entry	*entry, *temp;
2019 
2020 	rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2021 	TAILQ_FOREACH_SAFE(entry, &ulp_cntx_list, next, temp) {
2022 		if (entry->ulp_ctx == ulp_ctx) {
2023 			TAILQ_REMOVE(&ulp_cntx_list, entry, next);
2024 			rte_free(entry);
2025 			break;
2026 		}
2027 	}
2028 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2029 }
2030 
2031 struct bnxt_ulp_context *
2032 bnxt_ulp_cntxt_entry_acquire(void)
2033 {
2034 	struct ulp_context_list_entry	*entry;
2035 
2036 	/* take a lock and get the first ulp context available */
2037 	if (rte_spinlock_trylock(&bnxt_ulp_ctxt_lock)) {
2038 		TAILQ_FOREACH(entry, &ulp_cntx_list, next)
2039 			if (entry->ulp_ctx)
2040 				return entry->ulp_ctx;
2041 	}
2042 	return NULL;
2043 }
2044 
2045 void
2046 bnxt_ulp_cntxt_entry_release(void)
2047 {
2048 	rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2049 }
2050